diff --git a/.codespell.exclude b/.codespell.exclude new file mode 100644 index 00000000..a27d8a60 --- /dev/null +++ b/.codespell.exclude @@ -0,0 +1,45 @@ + echo "succes.label Login success" + echo "succes.draw AREA" +echo "succes.value $success" +Tim Small + accesss => 'NFSPROC3_ACCESS (Check Access Permission)', +# Beginn des modifizierten Skriptes - Beginning of the modified script # + [ /(\d+) packets with ECN ECT\(0\) bit set$/, [ [ :packets, 'ECT(0) bit' ] ] ], + [ /(\d+) packets with ECN ECT\(1\) bit set$/, [ [ :packets, 'ECT(1) bit' ] ] ], + [ /(\d+) ARP entrys? timed out$/, [ [ :entries, 'timed out' ] ] ], +echo -en "recieved.value " + print "require dont.graph.anything [0-9]+\n" +# Informations générales : +# Marge de bruit 5.10 dB 5.60 dB +# Adresse MAC Freebox XX:XX:XX:XX:XX:XX +# Adresse IP 203.0.113.60 +# Adresse IP privée 192.0.2.1 +# Adresse IP DMZ 192.0.2.2 +# Adresse IP Freeplayer 192.0.2.0 +# Adresse MAC Adresse IP +# Linjen som grep'es ut kan se ut som dette: +# Linjen som grep'es ut kan se ut som dette: +VISITORS="$(echo 'munin' | curl --silent -X POST -H 'Content-type: text/xml' -d @- http://api.trafic.ro/rest/0.01/sumar-site/$RID | xmlstarlet sel -t -m "/sumar-site/vizitatori_ultimele_24_ore" -v ".")" +# - Serien Timer werden nun separat gezaehlt (anzahl pro Woche) +# - Timer werden nur ignoriert wenn sie 0(inaktiv) als Status haben + primary muss ein iterable oder StringType sein + secondary muss iterable oder StringType sein + print ("d.label Design voltage\nd.type GAUGE\nd.draw AREA\n"); + if ($str =~ /^(no?|0|f(alse)?)$/i) { +# (Based off the perl munin plugin by Joan Carles Soler) + + +Christian Loos +# - kein div 0 Fehler mehr wenn der Host nicht zu erreichen ist +# - im Namen kann Munin-konform der Hostname mit angegeben werden: vdr_localhost vdr_192.168.0.2, ... (localhost ist default) + print "platte.info Angabe, wieviel der von VDR beschreibbaren Festplatten belegt ist.\n"; + echo 'graph_title Samba Locked Files' + echo 'graph_vlabel number of locked files' + echo 'samba_locked.label Locked Files' + echo 'lock.label Locked files' + echo 'graph_info This graph shows the Memory used by process' + echo 'shares.label shared files' +# Based on Tim Wulkau's script. Thank you! + echo 'graph_title Memory usage by process' + echo 'VmSize.info The size of the virtual memory allocated to the process' +grep -v 'Preparing to download files' | \ diff --git a/.codespell.ignore-words b/.codespell.ignore-words new file mode 100644 index 00000000..67699dd7 --- /dev/null +++ b/.codespell.ignore-words @@ -0,0 +1,5 @@ +cas +conexant +ende +referer +wan diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..4551085d --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +ignore = W503, E203 +max-line-length = 99 diff --git a/.gitignore b/.gitignore index 596cff88..20420e43 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ .*.swp *~ + +__pycache__/ diff --git a/.rubocop.yml b/.rubocop.yml new file mode 100644 index 00000000..31bda550 --- /dev/null +++ b/.rubocop.yml @@ -0,0 +1,7 @@ +# Allow "=begin"/"=end" multiline comments in order to protect munin's +# magic markers ("#%#"). +Style/BlockComments: + Enabled: false + +AllCops: + NewCops: enable diff --git a/.travis.yml b/.travis.yml index dd210c31..abad2157 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,61 +1,90 @@ --- -language: perl -install: - - sudo apt-get update - - sudo apt-get --no-install-recommends install devscripts python python3 ruby php5-cli gawk ksh zsh pylint - - sudo apt-get --no-install-recommends install pkg-config libdb-dev libvirt-dev libexpat-dev - # - Munin/Plugin.pm is in "munin-node" on precise - - sudo apt-get --no-install-recommends install munin-node +dist: bionic +addons: + apt: + packages: + - codespell + - devscripts + - python + - python-flake8 + - python3 + - python3-flake8 + - ruby + - php-cli + - gawk + - ksh + - zsh + - pylint + - shellcheck + - rubocop + - pkg-config + - libdb-dev + - libvirt-dev + - libexpat-dev - # Modules used by test script - - cpanm --notest Capture::Tiny + # Munin/Plugin.pm is in "munin-node" on precise + - munin-node + + # Modules used by test script + - libsys-virt-perl + - libcapture-tiny-perl + + # Modules used by plugins + - libberkeleydb-perl + - libcache-memcached-perl + - libgraphics-colornames-www-perl + - libdbd-pg-perl + - libdata-dump-perl + - libdate-manip-perl + - libdatetime-format-iso8601-perl + - libdevice-serialport-perl + - libfile-readbackwards-perl + - libgraphics-colorobject-perl + - libipc-run3-perl + - libipc-sharelite-perl + - libjson-perl + - libjson-any-perl + - libmail-sendmail-perl + - libmodern-perl-perl + - libmoosex-poe-perl + - libnet-dns-perl + - libnet-openssh-perl + - libnet-snmp-perl + - libnet-telnet-perl + - libnet-telnet-cisco-perl + - libpoe-perl + - libpoe-component-irc-perl + - libproc-processtable-perl + - libredis-perl + - libswitch-perl + - libtext-iconv-perl + - libwww-mechanize-perl + - libwww-mechanize-treebuilder-perl + - libyaml-perl + - libxml-libxml-perl + - libxml-simple-perl + - libxml-smart-perl + - libxml-twig-perl + - libexperimental-perl + +before_install: + - cpanm --local-lib=~/perl5 local::lib && eval $(perl -I ~/perl5/lib/perl5/ -Mlocal::lib) - cpanm --notest File::Find - cpanm --notest Test::More - # - # Modules used by plugins + # more Modules used by plugins - cpanm --notest Asterisk::AMI - - cpanm --notest BerkeleyDB - - cpanm --notest Cache::Memcached - - cpanm --notest DBD::Pg - - cpanm --notest Data::Dump - - cpanm --notest Date::Manip - cpanm --notest Date::Parse - - cpanm --notest DateTime::Format::ISO8601 - - cpanm --notest Device::SerialPort - cpanm --notest FCGI::Client - - cpanm --notest File::ReadBackwards - - cpanm --notest Graphics::ColorObject - - cpanm --notest IPC::Run3 - - cpanm --notest IPC::ShareLite - - cpanm --notest JSON - - cpanm --notest JSON::Any - - cpanm --notest Mail::Sendmail - - cpanm --notest Modern::Perl - - cpanm --notest MooseX::POE - - cpanm --notest Net::DNS - - cpanm --notest Net::OpenSSH - - cpanm --notest Net::SNMP - - cpanm --notest Net::Telnet - - cpanm --notest Net::Telnet::Cisco - - cpanm --notest POE - cpanm --notest POE::Component::IRC - cpanm --notest POE::Quickie - - cpanm --notest Proc::ProcessTable - - cpanm --notest Redis - - cpanm --notest WWW::Mechanize::TreeBuilder - - cpanm --notest Text::Iconv - - cpanm --notest YAML - - cpanm --notest XML::LibXML - - cpanm --notest XML::Simple - - cpanm --notest XML::Smart - - cpanm --notest XML::Twig - - cpanm --notest nvidia::ml - - cpanm --notest experimental - # - Sys::Virt version matching the test system's libvirt-dev - - cpanm --notest DANBERR/Sys-Virt-0.9.8.tar.gz + # Modules used bu plugins, but not compiling from CPAN + # - cpanm --notest nvidia::ml # Modules used by plugins, but missing on cpan # - File::Tail::Multi # - Sun::Solaris::Kstat # - VMware::VIRuntime # - MythTV -script: "PERL5LIB=$PERL5LIB:/usr/share/perl5 prove" + +script: + - make lint + - prove diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..fbdfd286 --- /dev/null +++ b/Makefile @@ -0,0 +1,7 @@ +.PHONY: lint +lint: + # TODO: add "--ignore-words .codespell.ignore-words" as soon as travis supports a newer + # testing environment (containing codespell 0.11 or later). + find plugins/ -type f -not -name "*.png" -not -name "*.conf" -not -name "*.jar" -not -name "*.pyc" -print0 \ + | xargs -0 codespell \ + --exclude-file .codespell.exclude diff --git a/README.md b/README.md index 46578d99..c6a6bdaf 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,18 @@ -This is the repository for all user contributed stuff +This is the repository for all user contributed stuff related to +[munin](http://munin-monitoring.org). + # contrib/plugins/ - 3rd-party plugins **This is usually where you want to begin your journey.** -Here you'll find all the plugins coming from http://exchange.munin-monitoring.org/. -That web site is for the time being disabled, new updates are done here. +Here you find a plethora of plugins for the most diverse topics. Please take a look and +improve existing or propose new plugins. + +Please read the [hints for plugin contributions](./plugins#contributed-munin-plugins). + +See the [gallery](http://gallery.munin-monitoring.org/) for a browsable overview of these plugins. -If a dedicated website comes back alive, its plugin backend will be this git repo. # contrib/templates/ - 3rd-party templates @@ -15,50 +20,53 @@ Feel free to update templates here, or even to create new ones. Bonus points for mobile-friendly ones :) -Note that the one named `official` is a loose-synced copy of the one in SVN trunk. -It should serves as a base for small editions that can be resynced in SVN trunk, so for that : +Note that the one named `official` is a loose-synced copy of the one distributed with munin. +It should serves as a base for small editions that can be merged into munin, so for that: * don't copy the whole template * directly edit files in this directory + # contrib/tools/ - 3rd-party tools -Here, you can put just any kind of tool. Please use this directory instead of a random place on the internet. +Here, you can put just any kind of tool. Please use this directory instead of a random place on the internet. It makes things way more easy to search for others. -And, it serves as an incubator of SVN `trunk/contrib` :-) # contrib/samples/ - 3rd-party examples of configs This serves as a repository for examples of various configs. You know, the ''learn by example'' way of doing things. + ## Notes to contributors ### Commits, Comments & Pull requests -We like to have _elementary_ commits as it is much easier to manage for reviewing and debugging. +We like to have _elementary_ commits as it is much easier to manage for reviewing and debugging. So please **don't** be afraid to make **as many** commits as needed. Merging many commits is as easy as merging one, if not easier. -A good rationale is that each commit shall have a one-liner commit comment as its first line. +A good rationale is that each commit shall have a one-liner commit comment as its first line. Ideally that first line has a prefix that shows the part the commit is about. It makes it very easy to see grouped changes, and it enable avoiding to look at the `--stat`. To know the prefix you should use, you can have a look at already existing commits. Next lines are optional and should only -explain the _why_ it is done this particular way. +explain the _why_ it is done this particular way. On the other side, pull requests can regroup many commits at once. Just try to explain in the pull comment the ''why'' we should merge it (if it's not obvious). Tim Pope wrote a [very nice tuto](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) on making good commit comments. + ### Licenses All the code here is licensed with the same terms as munin itself (GPLv2), unless specified otherwise inside a file. In all cases the code shall have an OSI-compatible license. Asking for a pull implies that you agree with that fact. -This change was made on Jun 1st 2012. If you wrote some code earlier and you do not agree to the new licensing default, you can : +This change was made on Jun 1st 2012. If you wrote some code earlier and you do not agree to the new licensing default, you can: - submit a licensing change pull -- submit a removal pull +- submit a removal pull + # Building status diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 00000000..2885e46a --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,54 @@ +# Contributed Munin Plugins + +This plethora of plugins covering various topics was contributed by many different users of [munin](http://munin-monitoring.org). + +See the [gallery](http://gallery.munin-monitoring.org/) for a browsable overview of these plugins. + + +## Purpose of this repository + +This repository of contributed plugin strives to achieve the following goals: + +* allow users to find interesting plugins +* allow contributors to publish their plugins +* simplify cooperative maintenance of plugins + +Contributed plugins are maintained primarily by their authors. +You may file bug reports for plugin issue here in this repository (`munin-contrib`), but please do not forget to notify the author of the plugin (see the plugin's documentation), too. + +Please note, that this repository is not supposed to be a dumping site for random plugins of low quality. The related infrastructure (e.g the [gallery](http://gallery.munin-monitoring.org/) or automated tests) require a certain level of quality. Please see below for details. + + +## Submit a new plugin + +1. check if a similar plugin exists and if it can be extended/changed instead of adding a new plugin + * please avoid code copies - they are a maintenance burden +2. add [documentation](http://guide.munin-monitoring.org/en/latest/develop/documenting.html#plugin-documentation) including configuration, author, license and [magic markers](http://guide.munin-monitoring.org/en/latest/architecture/syntax.html#magic-markers) +3. pick a suitable [category](http://guide.munin-monitoring.org/en/latest/reference/graph-category.html) +5. use style check tools for the language of the plugin (e.g. perl: `perlcritic`, shell: `shellcheck`, python: `flake8`, ruby: `rubocop`) +6. pick a suitable [name and location](#Plugin_name_and_location) +7. bonus: + * use the [multigraph approach](http://guide.munin-monitoring.org/en/latest/plugin/multigraphing.html#plugin-multigraphing) for non-trivial plugins + * add [example graphs](http://guide.munin-monitoring.org/en/latest/develop/plugins/plugin-gallery.html#rules-for-plugin-contributors) for the [gallery](http://gallery.munin-monitoring.org/) + * support [dirtyconfig](http://guide.munin-monitoring.org/en/latest/plugin/protocol-dirtyconfig.html#plugin-protocol-dirtyconfig) if it is suitable +8. open a [pull request](https://github.com/munin-monitoring/contrib/pull/) with your new plugin or send it attached to an email to the [mailing list](https://lists.sourceforge.net/lists/listinfo/munin-users) + +See the [plugin development documentation](http://guide.munin-monitoring.org/en/latest/develop/plugins/index.html) for more details. + + +## Modify an existing plugin + +* *try* to keep the plugin backwards compatible (e.g. keep data fieldnames unchanged) + * improvements of code quality and features can justify incompatible changes of existing plugins +* bonus: + * improve the existing plugins according to the [wishlist for new plugins](#Submit_a_new_plugin) + * upgrades from simple plugins to a [multigraph plugin](http://guide.munin-monitoring.org/en/latest/plugin/multigraphing.html#plugin-multigraphing) are welcome + + +## Plugin name and location + +The following descriptions are *intentions* - they do not necessarily describe the current state for all plugins. Please open a [pull request](https://github.com/munin-monitoring/contrib/pull/) if you want to align the current structure along the goals outlined below: + +* the top level directory should describe a related *software* or *vendor* + * use *concepts* or *platforms* only if it is really necessary (e.g. *cpu*, *bsd*, *memory*) +* subdirectories are usually not required diff --git a/plugins/accounting/accounting_ b/plugins/accounting/accounting_ old mode 100644 new mode 100755 index f6fd2116..526b69f9 --- a/plugins/accounting/accounting_ +++ b/plugins/accounting/accounting_ @@ -115,28 +115,26 @@ fi if [ "$1" == "autoconf" ]; then if [ -r /proc/net/dev ]; then - $IPTABLES -L INPUT -v -n -x >/dev/null 2>/dev/null - if [ $? -gt 0 ]; then - echo "no (could not run iptables as user `whoami`)" - exit 1 - else - echo yes - exit 0 + $IPTABLES -L INPUT -v -n -x -w >/dev/null 2>/dev/null + if [ $? -gt 0 ]; then + echo "no (could not run iptables as user `whoami`)" + else + echo yes fi else echo "no (/proc/net/dev not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then if [ $PROTO = "ipv4" ]; then - $IPTABLES -L INPUT -v -x -n 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv4_\1/p' - $IPTABLES -L OUTPUT -v -x -n 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv4_\1/p' + $IPTABLES -L INPUT -v -x -n -w 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv4_\1/p' + $IPTABLES -L OUTPUT -v -x -n -w 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv4_\1/p' elif [ $PROTO == "ipv6" ]; then - $IPTABLES -L INPUT -v -x -n 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv6_\1/p' - $IPTABLES -L OUTPUT -v -x -n 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv6_\1/p' + $IPTABLES -L INPUT -v -x -n -w 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv6_\1/p' + $IPTABLES -L OUTPUT -v -x -n -w 2>/dev/null | sed -n 's/^.*\/\* ACCT\-\([a-zA-Z\-]*\) \*\/.*$/\ipv6_\1/p' fi exit 0 @@ -192,12 +190,12 @@ if [ "$1" = "config" ]; then fi; echo 'multigraph '${0##*/}'_in' -$IPTABLES -L INPUT -v -n -x | grep "\/\* ACCT\-"$SUBCHAIN"\-tcp\-in \*\/" | tr -s '*' '-' | awk "{ print \"tcpIN.value \" \$2 }" -$IPTABLES -L INPUT -v -n -x | grep "\/\* ACCT\-"$SUBCHAIN"\-udp\-in \*\/" | tr -s '*' '-' | awk "{ print \"udpIN.value \" \$2 }" -$IPTABLES -L INPUT -v -n -x | grep "\/\* ACCT\-"$SUBCHAIN"\-icmp\-in \*\/" | tr -s '*' '-' | awk "{ print \"icmpIN.value \" \$2 }" +$IPTABLES -L INPUT -v -n -x -w | grep "\/\* ACCT\-"$SUBCHAIN"\-tcp\-in \*\/" | tr -s '*' '-' | awk "{ print \"tcpIN.value \" \$2 }" +$IPTABLES -L INPUT -v -n -x -w | grep "\/\* ACCT\-"$SUBCHAIN"\-udp\-in \*\/" | tr -s '*' '-' | awk "{ print \"udpIN.value \" \$2 }" +$IPTABLES -L INPUT -v -n -x -w | grep "\/\* ACCT\-"$SUBCHAIN"\-icmp\-in \*\/" | tr -s '*' '-' | awk "{ print \"icmpIN.value \" \$2 }" echo echo 'multigraph '${0##*/}'_out' -$IPTABLES -L OUTPUT -v -n -x | grep "\/\* ACCT\-"$SUBCHAIN"\-tcp\-out \*\/" | tr -s '*' '-' | awk "{ print \"tcpOUT.value \" \$2 }" -$IPTABLES -L OUTPUT -v -n -x | grep "\/\* ACCT\-"$SUBCHAIN"\-udp\-out \*\/" | tr -s '*' '-' | awk "{ print \"udpOUT.value \" \$2 }" -$IPTABLES -L OUTPUT -v -n -x | grep "\/\* ACCT\-"$SUBCHAIN"\-icmp\-out \*\/" | tr -s '*' '-' | awk "{ print \"icmpOUT.value \" \$2 }" +$IPTABLES -L OUTPUT -v -n -x -w | grep "\/\* ACCT\-"$SUBCHAIN"\-tcp\-out \*\/" | tr -s '*' '-' | awk "{ print \"tcpOUT.value \" \$2 }" +$IPTABLES -L OUTPUT -v -n -x -w | grep "\/\* ACCT\-"$SUBCHAIN"\-udp\-out \*\/" | tr -s '*' '-' | awk "{ print \"udpOUT.value \" \$2 }" +$IPTABLES -L OUTPUT -v -n -x -w | grep "\/\* ACCT\-"$SUBCHAIN"\-icmp\-out \*\/" | tr -s '*' '-' | awk "{ print \"icmpOUT.value \" \$2 }" diff --git a/plugins/amavis/amavis-debian b/plugins/amavis/amavis-debian index ef73e689..47d3c7b1 100755 --- a/plugins/amavis/amavis-debian +++ b/plugins/amavis/amavis-debian @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the amavis mail filter for Debian # (based upon a plugin authored by Geoffroy Desvernay) # @@ -32,16 +32,15 @@ BC=${bc:-`which bc`} mktempfile () { mktemp -} +} if [ "$1" = "autoconf" ]; then if [ -f "${AMAVIS_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" -a -n "${BC}" -a -x "${BC}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -96,7 +95,7 @@ then virus=`grep 'INFECTED' ${TEMP_FILE} | wc -l` spam=`grep 'Blocked SPAM' ${TEMP_FILE} | wc -l` other=`echo ${total}-${clean}-${virus}-${other}-${spam} | ${BC}` - + /bin/rm -f $TEMP_FILE fi diff --git a/plugins/amavis/amavis_ b/plugins/amavis/amavis_ index c27d9cb2..2287f4a4 100755 --- a/plugins/amavis/amavis_ +++ b/plugins/amavis/amavis_ @@ -45,11 +45,10 @@ my($db_home) = # DB databases directory if ($ARGV[0] and $ARGV[0] eq "autoconf") { if (-x "/usr/sbin/amavisd-agent") { print "yes\n"; - exit 0; } else { print "no (/usr/sbin/amavisd-agent not found or not executable)\n"; - exit 1; } + exit 0; } elsif ($ARGV[0] and $ARGV[0] eq "suggest") { print "time\n"; print "cache\n"; diff --git a/plugins/amavis/amavis_awk b/plugins/amavis/amavis_awk index 154a8d7c..7a3741fc 100755 --- a/plugins/amavis/amavis_awk +++ b/plugins/amavis/amavis_awk @@ -2,7 +2,7 @@ # # Plugin to monitor Amavis virus and spam statistics. # -# +# # Based on a routine by William Towle # Uncomment the cdef lines to convert the graph to mails/minute # Comment out the line "total.graph no" to show the total on the graph. This may not be aesthetically pleasing. @@ -23,11 +23,10 @@ STATEFILE=$MUNIN_PLUGSTATE/amavis.offset if [ "$1" = "autoconf" ]; then if [ -f "${MAIL_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -38,7 +37,7 @@ if [ "$1" = "config" ]; then echo 'graph_args --base 1000 -l 0' echo 'graph_order clean p_spam b_spam virus total' - + echo 'clean.min 0' echo 'clean.type ABSOLUTE' #echo 'clean.cdef clean,60,*' @@ -70,15 +69,13 @@ fi $LOGTAIL ${MAIL_LOG} $STATEFILE | \ -awk 'BEGIN { na= 0; nb= 0; nc= 0; nd= 0; total= 0 } +awk 'BEGIN { clean_count=0; passed_spam_count=0; blocked_spam_count=0; infected_count=0; total=0 } - { - - if (index($0, "Passed CLEAN")) { na++ ; total++ } - else if (index($0, "Passed SPAMMY")) { nb++ ; total++ } - else if (index($0, "Blocked SPAMMY")) { nc++ ; total++ } - else if (index($0, "INFECTED")) { nd++ ; total++ } + { + + if (index($0, "Passed CLEAN")) { clean_count++ ; total++ } + else if (index($0, "Passed SPAMMY")) { passed_spam_count++ ; total++ } + else if (index($0, "Blocked SPAMMY")) { blocked_spam_count++ ; total++ } + else if (index($0, "INFECTED")) { infected_count++ ; total++ } } - END { print "clean.value " na"\np_spam.value " nb"\nb_spam.value " nc"\nvirus.value " nd"\ntotal.value " total }' - - + END { print "clean.value " clean_count"\np_spam.value " passed_spam_count"\nb_spam.value " blocked_spam_count"\nvirus.value " infected_count"\ntotal.value " total }' diff --git a/plugins/amr/amr b/plugins/amr/amr index 75b981b4..c200ccec 100755 --- a/plugins/amr/amr +++ b/plugins/amr/amr @@ -54,14 +54,13 @@ sub autoconf if (-d $LOGDIR) { if (-f $logfile) { print "yes\n"; - exit 0; } else { print "no (logfile not found)\n"; } } else { print "no (could not find logdir)\n"; } - exit 1; + exit 0; } sub config @@ -134,7 +133,7 @@ sub parse while (<$log>) { # \d protects us against HTML injection here, be careful when changing - if (m,SCM:{ID:(\d+) +.* +Consumption: +(\d+) +,) { + if (m,SCM:\{ID:(\d+) +.* +Consumption: +(\d+) +,) { $stations{$1} = $2; $signals{$1}++; } diff --git a/plugins/amule/amule_queue b/plugins/amule/amule_queue index 649280bb..e73d37b0 100755 --- a/plugins/amule/amule_queue +++ b/plugins/amule/amule_queue @@ -1,13 +1,13 @@ #!/bin/sh # # Plugin to monitor the number of clients in the the aMule queue. -# +# # In order to use this plugin, you need to enable the "Online Signature" feature # available in aMule's configuration options. You will also need to tell aMule to save # the signature file in '/tmp' . # For detailed instructions with screenshots, see http://linux.andreagozzi.com/content/munin_stuff.php # -# +# # Parameters understood: # # config (required) @@ -28,16 +28,15 @@ if [ "$1" = "autoconf" ]; then if [ -z "$(which amule)" ]; then - echo "$0: error: amule not installed" - exit 1 + echo "no (amule not installed)" else if [ ! -e /tmp/amulesig.dat ]; then - echo "$0: error: amulesig.dat not found" + echo "no (amulesig.dat not found)" else echo yes - exit 0 fi fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/amule/amule_shares b/plugins/amule/amule_shares index 0b569cf0..c0129e35 100755 --- a/plugins/amule/amule_shares +++ b/plugins/amule/amule_shares @@ -1,13 +1,13 @@ #!/bin/sh # # Plugin to monitor the number of shared files with the aMule ed2k/KAD client. -# +# # In order to use this plugin, you need to enable the "Online Signature" feature # available in aMule's configuration options. You will also need to tell aMule to save # the signature file in '/tmp' . # For detailed instructions with screenshots, see http://linux.andreagozzi.com/content/munin_stuff.php # -# +# # Parameters understood: # # config (required) @@ -28,16 +28,15 @@ if [ "$1" = "autoconf" ]; then if [ -z "$(which amule)" ]; then - echo "$0: error: amule not installed" - exit 1 + echo "no (amule not installed)" else if [ ! -e /tmp/amulesig.dat ]; then - echo "$0: error: amulesig.dat not found" + echo "no (amulesig.dat not found)" else echo yes - exit 0 fi fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/amule/amule_transfers b/plugins/amule/amule_transfers index cae83ab0..79623d66 100755 --- a/plugins/amule/amule_transfers +++ b/plugins/amule/amule_transfers @@ -1,13 +1,13 @@ #!/bin/sh # # Plugin to monitor the UL/DL speed of the aMule ed2k/KAD client. -# +# # In order to use this plugin, you need to enable the "Online Signature" feature # available in aMule's configuration options. You will also need to tell aMule to save # the signature file in '/tmp' . # For detailed instructions with screenshots, see http://linux.andreagozzi.com/content/munin_stuff.php # -# +# # Parameters understood: # # config (required) @@ -28,16 +28,15 @@ if [ "$1" = "autoconf" ]; then if [ -z "$(which amule)" ]; then - echo "$0: error: amule not installed" - exit 1 + echo "no (amule not installed)" else if [ ! -e /tmp/amulesig.dat ]; then - echo "$0: error: amulesig.dat not found" + echo "no (amulesig.dat not found)" else echo yes - exit 0 fi fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/amule/amule_uptime b/plugins/amule/amule_uptime index 9931a1ea..c58114d8 100755 --- a/plugins/amule/amule_uptime +++ b/plugins/amule/amule_uptime @@ -1,13 +1,13 @@ #!/bin/sh # # Plugin to monitor the current aMule ed2k/KAD client uptime. -# +# # In order to use this plugin, you need to enable the "Online Signature" feature # available in aMule's configuration options. You will also need to tell aMule to save # the signature file in '/tmp' . # For detailed instructions with screenshots, see http://linux.andreagozzi.com/content/munin_stuff.php # -# +# # Parameters understood: # # config (required) @@ -28,16 +28,15 @@ if [ "$1" = "autoconf" ]; then if [ -z "$(which amule)" ]; then - echo "$0: error: amule not installed" - exit 1 + echo "no (amule not installed)" else if [ ! -e /tmp/amulesig.dat ]; then - echo "$0: error: amulesig.dat not found" + echo "no (amulesig.dat not found)" else echo yes - exit 0 fi fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/apache/apache_activity b/plugins/apache/apache_activity index 99e0e966..a1dfc3fa 100755 --- a/plugins/apache/apache_activity +++ b/plugins/apache/apache_activity @@ -42,20 +42,20 @@ my %chars = ( if (exists $ARGV[0] and $ARGV[0] eq "autoconf") { if ($ret) { print "no ($ret)\n"; - exit 1; + exit 0; } my $ua = LWP::UserAgent->new(timeout => 30); my @badports; - + foreach my $port (@PORTS) { my $url = sprintf $URL, $port; my $response = $ua->request(HTTP::Request->new('GET',$url)); push @badports, $port unless $response->is_success and $response->content =~ /Scoreboard/im; } - + if (@badports) { print "no (no apache server-status on ports @badports)\n"; - exit 1; + exit 0; } else { print "yes\n"; exit 0; @@ -75,7 +75,7 @@ if (exists $ARGV[0] and $ARGV[0] eq "config") { print $val, "\n"; print "activity_${port}_${char}.type GAUGE\n"; } - } + } exit 0; } diff --git a/plugins/apache/apache_average_time_last_n_requests b/plugins/apache/apache_average_time_last_n_requests old mode 100644 new mode 100755 index 1a32e602..3524ab02 --- a/plugins/apache/apache_average_time_last_n_requests +++ b/plugins/apache/apache_average_time_last_n_requests @@ -3,7 +3,7 @@ # Raphaël Droz - 2016-01-08 # # Monitors the average time requests matching a custom regexp takes -# For instance monitor time execution of files in http://example.com/foo/bar, +# For instance monitor time execution of files in http://example.com/foo/bar, # requests from google, images etc. # # Simply add an entry in the 'type' hashref and modify the description fields @@ -13,7 +13,7 @@ # # NOTE: You need to add a field in your Apache logs showing time executed. # This is normally done using the %T (seconds) or %D (microseconds) -# For instance: +# For instance: # LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %T %v" # Check http://httpd.apache.org/docs/2.2/mod/mod_log_config.html#formats for more info # @@ -62,14 +62,14 @@ my $types = { # any kind of request total => { munin_fields => { - label => 'All requests', + label => 'All requests', draw => 'LINE2', info => 'Average seconds per any request', }, sum => 0, lines => 0, - matches => sub { - return 1; + matches => sub { + return 1; }, }, @@ -82,11 +82,11 @@ my $types = { }, sum => 0, lines => 0, - matches => sub { - my ($fields) = @_; - my $script; - ($script = $fields->[6]) =~ s/\?.*\z //mx; - return $script =~ m{ \.(png|jpe?g|gif|tiff|ilbm|tga) \z }mx; + matches => sub { + my ($fields) = @_; + my $script; + ($script = $fields->[6]) =~ s/\?.*\z //mx; + return $script =~ m{ \.(png|jpe?g|gif|tiff|ilbm|tga) \z }mx; }, }, }; @@ -101,7 +101,7 @@ if (@ARGV && $ARGV[0] eq 'config') { } } exit(0); -} +} my $config_file = `ls -1 $ACCESS_LOG_PATTERN | tail -n 1`; @@ -121,13 +121,13 @@ foreach my $line (@lines) { $types->{$type}->{'lines'}++; } } -} +} } foreach my $type (keys %{$types}) { my $value = $types->{$type}->{'lines'} ? $types->{$type}->{'sum'} / $types->{$type}->{'lines'} : 'U'; printf "%s.value %s\n", ($type, $value); -} +} diff --git a/plugins/apache/apache_blackbox b/plugins/apache/apache_blackbox index 1346aa4e..e5d3c901 100755 --- a/plugins/apache/apache_blackbox +++ b/plugins/apache/apache_blackbox @@ -5,10 +5,10 @@ # luis peralta - luis@11870.com # http://www.ziritione.org # -# Installing: configure apache blackbox and set the logfile to /var/log/blackbox.log +# Installing: configure apache blackbox and set the logfile to /var/log/blackbox.log # or change the BLACKBOXLOG setting below. # -# Dependencies: apache mod_logio, apache blackbox +# Dependencies: apache mod_logio, apache blackbox # http://www.devco.net/archives/2008/03/05/detailed_apache_stats.php # # Last version available at: http://www.ziritione.org/http_status @@ -23,13 +23,13 @@ use strict; -my $BLACKBOXLOG = "/var/log/blackbox.log"; +my $BLACKBOXLOG = "/var/log/blackbox.log"; -my %WANTED = ( "apache.status.200" => "_200", - "apache.status.301" => "_301", - "apache.status.302" => "_302", - "apache.status.404" => "_404", - "apache.status.5xx" => "_5xx", +my %WANTED = ( "apache.status.200" => "_200", + "apache.status.301" => "_301", + "apache.status.302" => "_302", + "apache.status.404" => "_404", + "apache.status.5xx" => "_5xx", ); my $arg = shift(); @@ -78,7 +78,7 @@ graph_total total\n"); ); $num++; } - + } diff --git a/plugins/apache/apache_byprojects/README.md b/plugins/apache/apache_byprojects/README.md index 40ff2f29..736ecae1 100644 --- a/plugins/apache/apache_byprojects/README.md +++ b/plugins/apache/apache_byprojects/README.md @@ -2,15 +2,15 @@ Those plugins are used to monitor different projects or vhost (i.e. either different log files or using regular expression as filters) on the same web server. ## munin_byprojects_access -Count the number of hits per projects/vhost. +Count the number of hits per projects/vhost. ![byproject_access](https://www.mantor.org/~northox/misc/munin-plugins/nginx_byprojects_access1-month.png "byproject_access") ## munin_byprojects_bandwidth -Count the total bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required. +Count the total bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required. ![byproject_bandwidth](https://www.mantor.org/~northox/misc/munin-plugins/apache_byprojects_bandwidth-month.png "byproject_bandwidth") ## munin_byprojects_inout_bandwidth -Counts the in/out bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required. +Counts the in/out bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required. ![byproject_inout_bandwidth](https://www.mantor.org/~northox/misc/munin-plugins/apache_byprojects_inout_bandwidth-month.png "byproject_inout_bandwidth") ## Installation diff --git a/plugins/apache/apache_byprojects/byprojects_access b/plugins/apache/apache_byprojects/byprojects_access old mode 100644 new mode 100755 index a6dc444b..26b47745 --- a/plugins/apache/apache_byprojects/byprojects_access +++ b/plugins/apache/apache_byprojects/byprojects_access @@ -3,16 +3,16 @@ use strict; # # byprojects_access # -# Perl script to monitor access *byprojects* (e.g. vhost) from multiple files +# Perl script to monitor access *byprojects* (e.g. vhost) from multiple files # and/or regex. # -# Danny Fullerton +# Danny Fullerton # Mantor Organization # This work is licensed under a MIT license. # # You need logtail (https://www.fourmilab.ch/webtools/logtail/) # -# Log can be gathered from multiple sources by simply specifying multiple log +# Log can be gathered from multiple sources by simply specifying multiple log # filename or using wildcards (glob). File content can be selected using regex. # # - 'prod' => [ {'path' => '/home/prod/log/access.log'} ], @@ -74,7 +74,7 @@ foreach my $project ( keys %logs ) { my @paths = glob $log->{'path'}; foreach my $path (@paths) { my $state = $statepath.'/'.$project.$x.'_access.state'; - open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or + open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or die "Can't open $logtail : $!"; while () { my $buf = $_; diff --git a/plugins/apache/apache_byprojects/byprojects_bandwidth b/plugins/apache/apache_byprojects/byprojects_bandwidth old mode 100644 new mode 100755 index 5f6c5372..06546922 --- a/plugins/apache/apache_byprojects/byprojects_bandwidth +++ b/plugins/apache/apache_byprojects/byprojects_bandwidth @@ -3,10 +3,10 @@ use strict; # # byprojects_bandwidth # -# Perl script to monitor total bandwidth *byprojects* (e.g. vhost) from multiple +# Perl script to monitor total bandwidth *byprojects* (e.g. vhost) from multiple # files and/or regex. # -# Danny Fullerton +# Danny Fullerton # Mantor Organization # This work is licensed under a MIT license. # @@ -17,7 +17,7 @@ use strict; # "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" # where %I is input and %O is output. # -# Log can be gathered from multiple sources by simply specifying multiple log +# Log can be gathered from multiple sources by simply specifying multiple log # filename or using wildcards (glob). File content can be selected using regex. # # - 'prod' => [ {'path' => '/home/prod/log/access.log'} ], @@ -81,7 +81,7 @@ foreach my $project ( keys %logs ) { my @paths = glob $log->{'path'}; foreach my $path (@paths) { my $state = $statepath.'/'.$project.$x.'_totalbandwidth.state'; - open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or + open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or die "Can't open $logtail : $!"; while () { my $buf = $_; diff --git a/plugins/apache/apache_byprojects/byprojects_inout_bandwidth b/plugins/apache/apache_byprojects/byprojects_inout_bandwidth old mode 100644 new mode 100755 index 5722ce52..e08bb006 --- a/plugins/apache/apache_byprojects/byprojects_inout_bandwidth +++ b/plugins/apache/apache_byprojects/byprojects_inout_bandwidth @@ -3,10 +3,10 @@ use strict; # # byprojects_inout_bandwidth # -# Perl script to monitor in/out bandwidth *byprojects* (e.g. vhost) from +# Perl script to monitor in/out bandwidth *byprojects* (e.g. vhost) from # multiple files and/or regex. # -# Danny Fullerton +# Danny Fullerton # Mantor Organization # This work is licensed under a MIT license. # @@ -17,7 +17,7 @@ use strict; # "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" # where %I is input and %O is output. # -# Log can be gathered from multiple sources by simply specifying multiple log +# Log can be gathered from multiple sources by simply specifying multiple log # filename or using wildcards (glob). File content can be selected using regex. # # - 'prod' => [ {'path' => '/home/prod/log/access.log'} ], @@ -84,7 +84,7 @@ foreach my $project ( keys %logs ) { my @paths = glob $log->{'path'}; foreach my $path (@paths) { my $state = $statepath.'/'.$project.$x.'_inoutbandwidth.state'; - open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or + open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or die "Can't open $logtail : $!"; while () { my $buf = $_; diff --git a/plugins/apache/apache_memmory b/plugins/apache/apache_memmory deleted file mode 100755 index 10d89b3b..00000000 --- a/plugins/apache/apache_memmory +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/sh -# -*- sh -*- - -: << =cut - -=head1 NAME - -apache_memmory -Indicate the medium size of all the apache child process - -=head1 CONFIGURATION - -[apache_*] -env.apuser user_running_apache -env.binname apache_binary_name - - -=head1 AUTHOR - -Ricardo Fraile - -=head1 LICENSE - -GPLv2 - -=head1 MAGICK MARKERS - - #%# family=auto - #%# capabilities=autoconf - -=cut - -. $MUNIN_LIBDIR/plugins/plugin.sh - -USR=$apuser -PROCS=$binname - - -if [ "$1" = "autoconf" ]; then - echo yes - exit 0 -fi - -if [ "$1" = "config" ]; then - - echo 'graph_title Medium size of apache child process.' - echo 'graph_args --base 1000 -l 0 ' - echo 'graph_vlabel Kb' - echo 'graph_scale no' - echo 'graph_category webserver' - echo 'graph_info Indicate the memdium size of all the apache child process.' - - - - echo "servers.label servers" - echo "servers.type GAUGE" - echo "servers.min 0" - - exit 0 -fi - -VAL1=`ps auxf | grep ${PROCS} | grep ^${USR} | grep -v grep | wc -l` - -VAL2=`ps auxf | grep ${PROCS} | grep ^${USR} | grep -v grep | awk '{s+=$6} END {print s}'` - -VAL3=`expr $VAL2 / $VAL1` - -echo "servers.value $VAL3" - - diff --git a/plugins/apache/apache_memory b/plugins/apache/apache_memory new file mode 100755 index 00000000..4e5f84bb --- /dev/null +++ b/plugins/apache/apache_memory @@ -0,0 +1,66 @@ +#!/bin/sh +# -*- sh -*- + +: << =cut + +=head1 NAME + +apache_memory - Indicate the medium size of all the apache child process + +=head1 CONFIGURATION + +[apache_*] +env.apuser user_running_apache (default: "www-data") +env.binname apache_binary_name (default: "apache2") + + +=head1 AUTHOR + +Ricardo Fraile + +=head1 LICENSE + +GPLv2 + +=head1 MAGICK MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +USR=${apuser:-www-data} +PROCS=${binname:-apache2} + + +if [ "$1" = "autoconf" ]; then + echo yes + exit 0 +fi + +if [ "$1" = "config" ]; then + + echo 'graph_title Average size of apache child processes' + echo 'graph_args --base 1024 -l 0 ' + echo 'graph_vlabel Bytes' + echo 'graph_scale no' + echo 'graph_category webserver' + echo 'graph_info Indicate the memdium size of all the apache child process.' + + echo "servers.label servers" + echo "servers.type GAUGE" + echo "servers.min 0" + + exit 0 +fi + +matched_processes=$(ps auxf | grep -- "$PROCS" | grep "^$USR" | grep -v grep) +if [ -n "$matched_processes" ]; then + average_memory=$(printf '%s' "$matched_processes" | awk '{count+=1; sum+=$6} END {print sum / count * 1024}') +else + average_memory="U" +fi + +echo "servers.value $average_memory" diff --git a/plugins/apache/apache_servers b/plugins/apache/apache_servers index 04238fdb..2ec1cdb1 100755 --- a/plugins/apache/apache_servers +++ b/plugins/apache/apache_servers @@ -5,13 +5,13 @@ =head1 NAME -apache_servers -Indicate the number of apache servers running (child process) +apache_servers - Indicate the number of apache servers running (child process) =head1 CONFIGURATION [apache_*] -env.apuser user_runnin_apache -env.binname apache_binary_name +env.apuser user_running_apache (default: "www-data") +env.binname apache_binary_name (default: "apache2") =head1 AUTHOR @@ -28,37 +28,32 @@ GPLv2 =cut -. $MUNIN_LIBDIR/plugins/plugin.sh +. "$MUNIN_LIBDIR/plugins/plugin.sh" -USR=$apuser -PROCS=$binname +USR=${apuser:-www-data} +PROCS=${binname:-apache2} if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi if [ "$1" = "config" ]; then - echo 'graph_title Number of apache servers running.' - echo 'graph_args --base 1000 -l 0 ' - echo 'graph_vlabel servers' - echo 'graph_scale no' - echo 'graph_category webserver' - echo 'graph_info Indicate the number of apache servers running (child process).' + echo 'graph_title Number of apache servers running' + echo 'graph_args --base 1000 -l 0 ' + echo 'graph_vlabel servers' + echo 'graph_scale no' + echo 'graph_category webserver' + echo 'graph_info Indicate the number of apache servers running (child process).' + echo "servers.label servers" + echo "servers.type GAUGE" + echo "servers.min 0" + echo "servers.info Number of apache processes" - - echo "servers.label servers" - echo "servers.type GAUGE" - echo "servers.min 0" - echo "servers.info I/O on nfs" - - exit 0 + exit 0 fi -VAL1=`ps auxf | grep ${PROCS} | grep ^${USR} | grep -v grep | wc -l` - -echo "servers.value $VAL1" - - +process_count=$(ps auxf | grep -- "$PROCS" | grep "^$USR" | grep -v grep | wc -l) +echo "servers.value $process_count" diff --git a/plugins/apache/apache_smaps b/plugins/apache/apache_smaps index 3f063c12..905fe471 100755 --- a/plugins/apache/apache_smaps +++ b/plugins/apache/apache_smaps @@ -8,7 +8,7 @@ # to Apache2::SizeLimit. # Author: Kjetil Kjernsmo , based on work by William Viker -# Copyright (C) 2007 Opera Software ASA +# Copyright (C) 2007 Opera Software ASA # # Contibutors: Earle Nietzel # @@ -37,7 +37,7 @@ if (!eval "require Linux::Smaps;") { my $PNAME = exists $ENV{'pname'} ? $ENV{'pname'} : "httpd"; my $PUSER = exists $ENV{'puser'} ? $ENV{'puser'} : "apache"; -if (defined(@ARGV) && ($ARGV[0] eq 'config')) { +if (@ARGV && ($ARGV[0] eq 'config')) { print "graph_title Apache Smaps\n"; print "graph_args --base 1024 -l 0\n"; print "graph_vlabel Bytes\n"; diff --git a/plugins/apache/apache_status b/plugins/apache/apache_status index 9ca3f043..a17c4043 100755 --- a/plugins/apache/apache_status +++ b/plugins/apache/apache_status @@ -144,7 +144,7 @@ if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { if ($response->content =~ /^Total Accesses:/im ) { next; } else { - print "no (ExtendedStatus option for apache" + print "no (ExtendedStatus option for apache" . " mod_status is missing on port $port)\n"; exit 0; } diff --git a/plugins/apache/apache_threads b/plugins/apache/apache_threads index a231b392..f7c8fb6a 100755 --- a/plugins/apache/apache_threads +++ b/plugins/apache/apache_threads @@ -32,7 +32,7 @@ GPLv2 if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -81,8 +81,3 @@ done echo "threads.value `echo $((SUM / $COUNT))`" - - - - - diff --git a/plugins/apache/apache_tmemmory b/plugins/apache/apache_tmemmory index 3617c753..fcb26d4f 100755 --- a/plugins/apache/apache_tmemmory +++ b/plugins/apache/apache_tmemmory @@ -5,12 +5,13 @@ =head1 NAME -apache_tmemmory -Indicate the total memory used by apache +apache_tmemmory - Indicate the total memory used by apache =head1 CONFIGURATION [apache_*] -env.binname apache_binary_name +env.apuser user_running_apache (default: "www-data") +env.binname apache_binary_name (default: "apache2") =head1 AUTHOR @@ -27,38 +28,32 @@ GPLv2 =cut -. $MUNIN_LIBDIR/plugins/plugin.sh +. "$MUNIN_LIBDIR/plugins/plugin.sh" - -USR=$apuser -PROCS=$binname +USR=${apuser:-www-data} +PROCS=${binname:-apache2} if [ "$1" = "autoconf" ]; then - echo yes - exit 0 + echo yes + exit 0 fi if [ "$1" = "config" ]; then - echo 'graph_title Total memory used by apache' - echo 'graph_args --base 1000 -l 0 ' - echo 'graph_vlabel Mb' - echo 'graph_scale no' - echo 'graph_category webserver' - echo 'graph_info Indicate the total memory used by apache.' + echo 'graph_title Total memory used by apache' + echo 'graph_args --base 1024 -l 0 ' + echo 'graph_vlabel bytes' + echo 'graph_scale no' + echo 'graph_category webserver' + echo 'graph_info Indicate the total memory used by apache.' - echo "servers.label servers" - echo "servers.type GAUGE" - echo "servers.min 0" + echo "servers.label servers" + echo "servers.type GAUGE" + echo "servers.min 0" - exit 0 + exit 0 fi -VAL1=`ps auxf | grep ${PROCS} | grep -v grep | awk '{s+=$6} END {print s}'` - -VAL2=`expr $VAL1 / 1024` - -echo "servers.value $VAL2" - - +total_memory=$(ps auxf | grep -- "$PROCS" | grep "^$USR" | grep -v grep | awk '{s+=$6} END {print s * 1024}') +echo "servers.value $total_memory" diff --git a/plugins/apache/apache_users b/plugins/apache/apache_users index 4193a1af..257e8fa1 100755 --- a/plugins/apache/apache_users +++ b/plugins/apache/apache_users @@ -23,16 +23,13 @@ then if ! ls $ACCESSLOG > /dev/null then echo "no (could not find apache access log \"$ACCESSLOG\")" - exit 1 elif ! ls $DIRECTORY > /dev/null then echo "no (could not find munin plugins directory \"$DIRECTORY\")" - exit 2 - else echo "yes" - exit 0 fi + exit 0 fi # ######################################################################################### INIT @@ -100,7 +97,7 @@ do else USERNAME[$I]="anonymous" fi - NAME[$I]="_${USERNAME[I]}" # Output sort order + NAME[$I]="_${USERNAME[I]}" # Output sort order else NAME[$I]="${USERNAME[I]}" fi @@ -140,7 +137,7 @@ then FILENAMES=$( find $DIRECTORY -type f -not -wholename $TIMESTAMP | sort) awk '{ printf "%s.label %s\n%s.draw AREA\n", $1, $3, $1 }' $( echo "$FILENAMES" | head -n1 ) - + for FILENAME in $( echo "$FILENAMES" | tail -n+2) do awk '{ printf "%s.label %s\n%s.draw STACK\n", $1, $3, $1 }' $FILENAME diff --git a/plugins/apache/apache_vhosts/apache_logparser b/plugins/apache/apache_vhosts/apache_logparser old mode 100644 new mode 100755 index 1f427b5e..1da00230 --- a/plugins/apache/apache_vhosts/apache_logparser +++ b/plugins/apache/apache_vhosts/apache_logparser @@ -95,7 +95,7 @@ sub count { #find sitename $file=~s/$site/$1/; $file=$vhost if $vhost; - + # skip broken lines next unless $file; @@ -103,7 +103,7 @@ sub count { my $vpm=clean_fieldname("$file"); $temp{$vpm}{'label'}="$file"; $temp{$vpm}{'label'}=~s/www\.//; - + # count all requests $temp{$vpm}{'requests'}++; @@ -118,13 +118,13 @@ sub count { # average bytes $temp{$vpm}{'avg_bytes'}=$temp{$vpm}{'bytes'}/$temp{$vpm}{'requests'} || 0; } - + # count by status / error code $temp{$vpm}{"status"}{$status}++ if $status; if ($time) { # microsec to millisec - $time=sprintf("%d",$time/1000); + $time=sprintf("%d",$time/1000); # min/max execution time $temp{$vpm}{'max_time'}=max($temp{$vpm}{'max_time'},$time) || 0; @@ -144,9 +144,9 @@ while (1) { # tail files, calls &count with linearray $tail->read; - # begin transaction + # begin transaction $share->lock(LOCK_EX); - + # get data (may be updated by other loggers too) my %old=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run @@ -182,7 +182,7 @@ while (1) { $share->store( freeze \%old ); # end transaction $share->unlock; - + # parse/write every n seconds (plus processing time) sleep $nsec; } diff --git a/plugins/apache/apache_vhosts/apache_pipelogger b/plugins/apache/apache_vhosts/apache_pipelogger old mode 100644 new mode 100755 index 09e39617..5bdf9188 --- a/plugins/apache/apache_vhosts/apache_pipelogger +++ b/plugins/apache/apache_vhosts/apache_pipelogger @@ -40,12 +40,12 @@ while () { # sanity check next unless m/^([\d\w\.\-_]+\s){5}([\d\w\.\-_]+$)/; # escaped "." and "-" - + # sitename to munin fieldname my $vpm=clean_fieldname($vhost); $temp{$vpm}{'label'}=$vhost; $temp{$vpm}{'label'}=~s/www\.//; - + # count all requests $temp{$vpm}{'requests'}++; @@ -60,7 +60,7 @@ while () { # average bytes $temp{$vpm}{'avg_bytes'}=$temp{$vpm}{'bytes'}/$temp{$vpm}{'requests'} || 0 if ($bytes); } - + # count by status / error code $temp{$vpm}{"status"}{$status}++ if $status; @@ -77,9 +77,9 @@ while () { }; sub periodic_write { - # begin transaction + # begin transaction $share->lock(LOCK_EX); - + # get data (may be updated by other loggers too) my %old=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run @@ -95,7 +95,7 @@ sub periodic_write { $old{$vpm}{'avg_bytes'}=sprintf("%d",($old{$vpm}{'avg_bytes'}+$temp{$vpm}{'avg_bytes'})/2); # reset local counters - foreach my $check qw(requests bytes time cml_time max_bytes avg_bytes max_time avg_time) { + foreach my $check (qw(requests bytes time cml_time max_bytes avg_bytes max_time avg_time)) { $temp{$vpm}{$check}=0; } diff --git a/plugins/apache/apache_vhosts/apache_vhosts b/plugins/apache/apache_vhosts/apache_vhosts old mode 100644 new mode 100755 index 74150dce..a13141ca --- a/plugins/apache/apache_vhosts/apache_vhosts +++ b/plugins/apache/apache_vhosts/apache_vhosts @@ -18,13 +18,13 @@ This plugin requires data from apache. You can get at the data in two ways: - slightly less performant, but easier to apply to existing installations - If you want response time stats, you have to log them in apache: - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined-time + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined-time - Configure the log parser to match your installation regarding naming and log folders - -You can use both solutions simultaneously, the data will be merged. + +You can use both solutions simultaneously, the data will be merged. Be aware that a apache log CustomLog directive in the master config will only log those vhosts that have no directive of their own. - + Install plugin conf (after [apache_*]) [apache_vhosts] @@ -36,14 +36,14 @@ env.checks requests bytes time # subgraphs - create multigraph subgraphs (watch your graphing performance...), default 0 # checks - enable stats on bytes and response times per request, you have to log these in apache -A word on performance: +A word on performance: Requests/sec should not be much of a problem. Pipelogger and Logparser should not have man performance problems, as the apply one regex per line and add some stats. -Stats are saved every n seconds (default: 7) to shared mem in serialized format. That should be ok on the most loaded servers (unless you watch cache logs). -I would estimate that > 10k log lines/sec could start becoming a problem, you might have to start tuning there or use a dedicated system. +Stats are saved every n seconds (default: 7) to shared mem in serialized format. That should be ok on the most loaded servers (unless you watch cache logs). +I would estimate that > 10k log lines/sec could start becoming a problem, you might have to start tuning there or use a dedicated system. You might think about splitting the logs over multiple Logparser scripts to parallelize and merge in larger intervals. -Graphing is another matter, the more vhosts you have. -With subgraphs off, you do 3 main graphs * 4 timescales (day, week, month, year). +Graphing is another matter, the more vhosts you have. +With subgraphs off, you do 3 main graphs * 4 timescales (day, week, month, year). With subgraphs on, you get 2 checks * (1 + 6 * #vhosts) + 1 check * (1 + #vhosts * #statuscodes * 4) With hundreds of vhosts that becomes a problem, as munin-update and munin-html do not scale well. @@ -152,7 +152,7 @@ ${site}_${graph}_$check.type GAUGE END } # end graph } # end sites - } # end subgraph + } # end subgraph } # end checks @@ -173,7 +173,7 @@ graph_period minute graph_order $order END foreach my $site (keys %data) { - + print <new (timeout => 30); my @badports; - + foreach my $port (@PORTS) { my $url = sprintf $URL, $port; my $response = $ua->request (HTTP::Request->new('GET', $url)); push @badports, $port unless $response->is_success; } - + if (@badports) { print "no (no mod_watch exists on ports @badports)\n"; - exit 1; + exit 0; } else { print "yes\n"; exit 0; @@ -76,7 +76,7 @@ foreach my $port (@PORTS) { my ($server, undef, $ifInOctets, $ifOutOctets, $ifRequests, $ifDocuments) = split (/\s/, $string, 6); push @servers, $server unless $server eq "SERVER"; - push @data, "$server $ifInOctets $ifOutOctets $ifRequests $ifDocuments" + push @data, "$server $ifInOctets $ifOutOctets $ifRequests $ifDocuments" unless $server eq "SERVER"; } } diff --git a/plugins/apache/page_load b/plugins/apache/page_load old mode 100644 new mode 100755 index cc09a885..8c952c68 --- a/plugins/apache/page_load +++ b/plugins/apache/page_load @@ -22,38 +22,38 @@ # # apache2.conf # LogFormat "%h %l %u %t \"%r\" %>s %O %b %D \"%{Referer}i\" \"%{User-Agent}i\"" custom -# +# # According to : http://httpd.apache.org/docs/2.2/mod/mod_log_config.html # %D The time taken to serve the request, in microseconds. # In our case %D -> 9 -###################################################################################################### +###################################################################################################### # GLOBALS LOGFILE="/var/log/apache2/access.log" BUFFER_SIZE=500 ###################################################################################################### - + do_ () { command="tail -n $BUFFER_SIZE $LOGFILE | awk '{sum=sum+\$9} END {print \"exec_time.value \"(sum/$BUFFER_SIZE)/1000000}'" eval $command exit 0 } - + do_config () { echo "graph_title Average page execution time" echo "graph_vlabel Seconds" echo "graph_category webserver" echo "graph_args --base 1000 -l 0" echo "graph_info Average page execution time" - + echo "exec_time.label Execution time" echo "exec_time.type GAUGE" } - + case $1 in config|'') eval do_$1 esac - + exit $? diff --git a/plugins/approx/approx b/plugins/approx/approx deleted file mode 100755 index f2d4c253..00000000 --- a/plugins/approx/approx +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python -# -# vim:syntax=python -# -# Plugin to monitor the amount of packages in an approx cache. -# -# Usage: place in /etc/munin/plugins/ (or link it there using ln -s) -# -# Parameters understood: -# -# config (required) -# autoconf (optional - used by munin-config) -# -# Magic markers - optional - used by installation scripts and -# munin-config: -# -#%# family=manual -#%# capabilities=autoconf -# -# Now for the real work... - -from sys import argv, exit -from os.path import walk, exists, isfile, join - -def get_file_types(): - """Returns an array of filetype => count.""" - - out = {} - - def visitor(arg, dirname, names): - for file in names: - if not isfile(join(dirname, file)): - continue - ext = file.split(".")[-1] - - out[ext] = out.get(ext, 0) + 1 - - walk('/var/cache/approx/', visitor, None) - - return out - - -# Autoconfiguration -if len(argv) > 1: - - if argv[1] == "autoconf": - # Test if we can find a approx cache - if exists('/var/cache/approx'): - print "yes" - else: - print "no ('/var/cacne/approx' not found)" - exit(1) - exit() - - elif argv[1] == "config": - print "graph_title Approx cache"; - print "graph yes"; - #print "graph_category Other"; - #print "graph_total Total"; - print "graph_info Statistics from the Approx cache."; - #print "debs.label DEBs"; - #print "pending.warning 0:0"; - #print "hold.label hold"; - for type in get_file_types().keys(): - print "%s.label %s" % (type.lower(), type) - exit() - -for type, count in get_file_types().iteritems(): - print "%s.value %d" % (type.lower(), count) - -exit() diff --git a/plugins/apt/approx b/plugins/apt/approx new file mode 100755 index 00000000..5a133a7e --- /dev/null +++ b/plugins/apt/approx @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# +# vim:syntax=python +# +# Plugin to monitor the amount of packages in an approx cache. +# +# Usage: place in /etc/munin/plugins/ (or link it there using ln -s) +# +# Parameters understood: +# +# config (required) +# autoconf (optional - used by munin-config) +# +# Magic markers - optional - used by installation scripts and +# munin-config: +# +# #%# family=manual +# #%# capabilities=autoconf +# +# Now for the real work... + +from os.path import walk, exists, isfile, join +from sys import argv, exit + + +def get_file_types(): + """Returns an array of filetype => count.""" + out = {} + + def visitor(arg, dirname, names): + for filename in names: + if not isfile(join(dirname, filename)): + continue + ext = filename.split(".")[-1].lower() + out[ext] = out.get(ext, 0) + 1 + + walk('/var/cache/approx/', visitor, None) + return out + + +if len(argv) > 1: + + # Autoconfiguration + if argv[1] == "autoconf": + # Test if we can find a approx cache + if exists('/var/cache/approx'): + print("yes") + else: + print("no ('/var/cache/approx' not found)") + exit() + + elif argv[1] == "config": + print("graph_title Approx cache") + print("graph yes") + print("graph_category loadbalancer") + print("graph_info Statistics from the Approx cache.") + for filetype in get_file_types().keys(): + print("%s.label %s" % (filetype.lower(), filetype)) + exit() + +for filetype, count in get_file_types().iteritems(): + print("%s.value %d" % (filetype.lower(), count)) + +exit() diff --git a/plugins/apt/deb_packages/README.md b/plugins/apt/deb_packages/README.md index 36fe2c32..781df2c5 100644 --- a/plugins/apt/deb_packages/README.md +++ b/plugins/apt/deb_packages/README.md @@ -24,20 +24,20 @@ This plugin has checked on Debian - Wheezy and squeeze. If you want to use it on older installations, tell me whether it works or which errors you had. It should run past python-apt 0.7 and python 2.5. -check out this git repository from - +check out this git repository from + aptitude install python-apt git clone git://github.com/munin-monitoring/contrib.git cd contrib/plugins/apt/deb_packages sudo cp deb_packages.py /etc/munin/plugins/deb_packages - sudo cp deb_packages.munin-conf /etc/munin/plugin-conf.d/deb_packages + sudo cp deb_packages.munin.conf /etc/munin/plugin-conf.d/deb_packages Verify the installation by sudo munin-run deb_packages ### Configuration -If you copied deb_packages.munin-conf to plugin-conf.d you have a starting point. +If you copied deb_packages.munin.conf to plugin-conf.d you have a starting point. A typical configuration looks like this [deb_packages] @@ -51,7 +51,7 @@ A typical configuration looks like this # Age in seconds an $CACHE_FILE can be. If it is older, the script updates # default if not set is 3540 (one hour) # at the moment this is not used, the plugin always runs (if munin calls it) - # + # env.CACHE_FILE_MAX_AGE 3540 # All these numbers are only for sorting, so you can use env.graph01_sort_by_0 diff --git a/plugins/apt/deb_packages/deb_packages.munin-conf b/plugins/apt/deb_packages/deb_packages.munin.conf similarity index 100% rename from plugins/apt/deb_packages/deb_packages.munin-conf rename to plugins/apt/deb_packages/deb_packages.munin.conf diff --git a/plugins/apt/deb_packages/deb_packages.py b/plugins/apt/deb_packages/deb_packages.py index 371e4d10..f2c21eb6 100755 --- a/plugins/apt/deb_packages/deb_packages.py +++ b/plugins/apt/deb_packages/deb_packages.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -""" +""" A munin plugin that prints archive and their upgradable packets TODO: make it usable and readable as commandline tool @@ -13,18 +13,18 @@ TODO: separate into 2 graphs sorting a packet to the newest archive (WONTFIX unless someone asks for) -TODO: +TODO: • addinge alternative names for archives "stable -> squeeze" -TODO: add gray as +TODO: add gray as foo.colour 000000 to 'now', '', '', '', '', 'Debian dpkg status file' TODO: update only if system was updated (aptitutde update has been run) • check modification date of /var/cache/apt/pkgcache.bin • cache file must not be older than mod_date of pkgcache.bin + X -TODO: shorten ext_info with getShortestConfigOfOptions -TODO: check whether cachefile matches the config +TODO: shorten ext_info with getShortestConfigOfOptions +TODO: check whether cachefile matches the config • i have no clever idea to do this without 100 lines of code -BUG: If a package will be upgraded, and brings in new dependencies, +BUG: If a package will be upgraded, and brings in new dependencies, these new deps will not be counted. WONTFIX """ import sys @@ -32,7 +32,7 @@ import argparse import apt_pkg from apt.progress.base import OpProgress from time import time, strftime -import os +import os import StringIO import string import re @@ -41,7 +41,7 @@ from types import StringTypes, TupleType, DictType, ListType, BooleanType class EnvironmentConfigBroken(Exception): pass -# print environmental things +# print environmental things # for k,v in os.environ.iteritems(): print >> sys.stderr, "%r : %r" % (k,v) def getEnv(name, default=None, cast=None): @@ -68,14 +68,14 @@ MAX_LIST_SIZE_EXT_INFO = getEnv('MAX_LIST_SIZE_EXT_INFO', default=50, cast=int) STATE_DIR = getEnv('MUNIN_PLUGSTATE', default='.') CACHE_FILE = os.path.join(STATE_DIR, "deb_packages.state") -""" +""" There is no need to execute this script every 5 minutes. The Results are put to this file, next munin-run can read from it CACHE_FILE is usually /var/lib/munin/plugin-state/debian_packages.state """ CACHE_FILE_MAX_AGE = getEnv('CACHE_FILE_MAX_AGE', default=3540, cast=int) -""" +""" Age in seconds an $CACHE_FILE can be. If it is older, the script updates """ @@ -84,7 +84,7 @@ def Property(func): class Apt(object): """ - lazy helperclass i need in this statisticprogram, which have alle the apt_pkg stuff + lazy helperclass i need in this statisticprogram, which has all the apt_pkg stuff """ def __init__(self): @@ -103,9 +103,9 @@ class Apt(object): doc = "apt_pkg.Cache instance, lazy instantiated" def fget(self): class NullProgress(OpProgress): - """ used for do not giving any progress info, - while doing apt things used, cause documented - use of None as OpProgress did not worked in + """ used for do not giving any progress info, + while doing apt things used, cause documented + use of None as OpProgress did not worked in python-apt 0.7 """ def __init__(self): @@ -119,8 +119,8 @@ class Apt(object): def update(*args,**kwords): pass - if self._cache is None: - self._cache = apt_pkg.Cache(NullProgress()) + if self._cache is None: + self._cache = apt_pkg.Cache(NullProgress()) return self._cache return locals() @@ -129,7 +129,7 @@ class Apt(object): doc = "apt_pkg.DepCache object" def fget(self): - if self._depcache is None: + if self._depcache is None: self._depcache = apt_pkg.DepCache(self.cache) return self._depcache @@ -180,7 +180,7 @@ apt = Apt() apt.installedPackages apt.upgradablePackages - initialisation is lazy + initialisation is lazy """ def weightOfPackageFile(detail_tuple, option_tuple): @@ -214,7 +214,7 @@ def Tree(): class TreeTwig(defaultdict): def __init__(self, defaultFactory): - super(TreeTwig, self).__init__(defaultFactory) + super(TreeTwig, self).__init__(defaultFactory) def printAsTree(self, indent=0): for k, tree in self.iteritems(): @@ -245,16 +245,16 @@ class TreeTwig(defaultdict): def getShortestConfigOfOptions(optionList = ['label', 'archive', 'site']): - """ + """ tries to find the order to print a tree of the optionList - with the local repositories with the shortest line + with the local repositories with the shortest line possible options are: 'component' 'label' 'site' 'archive' - 'origin' - 'architecture' + 'origin' + 'architecture' Architecture values are usually the same and can be ignored. tells you which representation of a tree as line is shortest. @@ -262,19 +262,19 @@ def getShortestConfigOfOptions(optionList = ['label', 'archive', 'site']): to write the shortest readable output. """ l = optionList # just because l is much shorter - + # creating possible iterations fieldCount = len(optionList) if fieldCount == 1: selection = l elif fieldCount == 2: - selection = [(x,y) - for x in l + selection = [(x,y) + for x in l for y in l if x!=y ] elif fieldCount == 3: - selection = [(x,y,z) - for x in l - for y in l if x!=y + selection = [(x,y,z) + for x in l + for y in l if x!=y for z in l if z!=y and z!=x] else: raise Exception("NotImplemented for size %s" % fieldCount) @@ -289,7 +289,7 @@ def getShortestConfigOfOptions(optionList = ['label', 'archive', 'site']): r = min( d.items(), key=lambda x: x[1] ) return list(r[0]), r[1] - + def getOptionsTree(cache, keys=None): """ t = getOptionsTree(cache, ['archive', 'site', 'label']) @@ -322,16 +322,16 @@ def createKey(key, file): """ if type(key) in StringTypes: return file.__getattribute__(key) - elif type(key) in (TupleType, ListType): + elif type(key) in (TupleType, ListType): nKey = tuple() for pKey in key: nKey = nKey.__add__((file.__getattribute__(pKey),)) return nKey else: - raise Exception("Not implemented for keytype %s" % type(key)) + raise Exception("Not implemented for keytype %s" % type(key)) def getOptionsTree2(cache, primary=None, secondary=None): - """ + """ primary muss ein iterable oder StringType sein secondary muss iterable oder StringType sein t1 = getOptionsTree2(apt.cache, 'origin', ['site', 'archive']) @@ -369,24 +369,24 @@ def getOptionsTree2(cache, primary=None, secondary=None): dKey = file.__getattribute__(sKey) d = d[dKey] return t - + #def getAttributeSet(iterable, attribute): # return set(f.__getattribute__(attribute) for f in iterable) # #def getOrigins(cache): -# return getAttributeSet(cache.file_list, 'origin') +# return getAttributeSet(cache.file_list, 'origin') # #def getArchives(cache): -# return getAttributeSet(cache.file_list, 'archive') +# return getAttributeSet(cache.file_list, 'archive') # #def getComponents(cache): -# return getAttributeSet(cache.file_list, 'component') +# return getAttributeSet(cache.file_list, 'component') # #def getLabels(cache): -# return getAttributeSet(cache.file_list, 'label') +# return getAttributeSet(cache.file_list, 'label') # #def getSites(cache): -# return getAttributeSet(cache.file_list, 'site') +# return getAttributeSet(cache.file_list, 'site') # class PackageStat(defaultdict): @@ -397,16 +397,16 @@ class PackageStat(defaultdict): with some abilities to print output munin likes """ - sortDict = { 'label': defaultdict( lambda : 20, - {'Debian': 90, + sortDict = { 'label': defaultdict( lambda : 20, + {'Debian': 90, '' : 1, 'Debian Security' : 90, 'Debian Backports': 90}), 'archive': defaultdict( lambda : 5, - { 'now': 0, + { 'now': 0, 'experimental': 10, - 'unstable': 50, - 'sid': 50, + 'unstable': 50, + 'sid': 50, 'testing': 70, 'wheezy': 70, 'squeeze-backports': 80, @@ -426,9 +426,9 @@ class PackageStat(defaultdict): } """ Values to sort options (label, archive, origin ...) - (0..99) is allowed. + (0..99) is allowed. (this is needed for other graphs to calc aggregated weights) - higher is more older and more official or better + higher is more older and more official or better """ dpkgStatusValue = { 'site': '', 'origin': '', 'label': '', 'component': '', 'archive': 'now' } @@ -443,7 +443,7 @@ class PackageStat(defaultdict): 'component' : 10**2, } """ - Dict that stores multipliers + Dict that stores multipliers to compile a sorting value for each archivefile """ @@ -483,7 +483,7 @@ class PackageStat(defaultdict): def addPackage(self, sourceFile, package): if self.packetHandler.decider(package): self.packetHandler.adder(package, self) - + @classmethod def configD(cls, key, value): i = { 'rrdName': cls.generate_rrd_name_from(key), @@ -514,8 +514,8 @@ class PackageStat(defaultdict): print "{rrdName}.draw AREASTACK".format(**i) def optionIsDpkgStatus(self, details, options=None): - """ - give it details and options and it tells you whether the datails looks like they come from + """ + give it details and options and it tells you whether the datails looks like they come from a 'Debian dpkg status file'. """ # setting defaults @@ -530,7 +530,7 @@ class PackageStat(defaultdict): return isNow def printValues(self): - print "\nmultigraph packages_{option}_{type}".format(option=self.generate_rrd_name_from(self.option), + print "\nmultigraph packages_{option}_{type}".format(option=self.generate_rrd_name_from(self.option), type=self.packetHandler.type) for options, item in self.options_sorted: if not self.packetHandler.includeNow and self.optionIsDpkgStatus(details=options): @@ -555,7 +555,7 @@ packetHandlerD = {} class PacketHandler(object): """ - Baseclass, that represents the Interface which is used + Baseclass, that represents the Interface which is used """ type = None @@ -575,7 +575,7 @@ class PacketHandler(object): def adder(self, package, packageStat, *args, **kwords): """ - take the package and add it tho the packageStat dictionary in defined way + take the package and add it to the packageStat dictionary in defined way """ pass @@ -591,7 +591,7 @@ class PacketHandler(object): return weightOfPackageFile(details, options) class PacketHandlerUpgradable(PacketHandler): - + type='upgradable' includeNow = False extInfoItemString = " {i[0].name} <{i[1]} -> {i[2]}>" @@ -628,7 +628,7 @@ class PacketHandlerInstalled(PacketHandler): # this item (as i) is used for input in extInfoItemString item = package packageStat[keys].append(item) - + # registering PackageHandler for Usage packetHandlerD[PacketHandlerInstalled.type] = PacketHandlerInstalled @@ -637,7 +637,7 @@ class Munin(object): def __init__(self, commandLineArgs=None): self.commandLineArgs = commandLineArgs self.argParser = self._argParser() - self.executionMatrix = { + self.executionMatrix = { 'config': self.config, 'run' : self.run, 'autoconf' : self.autoconf, @@ -685,7 +685,7 @@ class Munin(object): else: raise Exception('DPKG-statusfile %r not found, really strange!!!'%dpkgStatusFile) newestFileTimestamp = max(timeL) - age = newestFileTimestamp - cacheMTime + age = newestFileTimestamp - cacheMTime if age > 0: return True else: @@ -709,7 +709,7 @@ class Munin(object): # cacheNeedUpdate = True if self._cacheIsOutdated() or self.args.nocache: - # save stdout + # save stdout stdoutDef = sys.stdout try: out = StringIO.StringIO() @@ -765,7 +765,7 @@ class Munin(object): def _argParser(self): parser = argparse.ArgumentParser(description="Show some statistics "\ "about debian packages installed on system by archive", - ) + ) parser.set_defaults(command='run', debug=True, nocache=True) parser.add_argument('--nocache', '-n', default=False, action='store_true', @@ -775,7 +775,7 @@ class Munin(object): run ........ munin run (writes values) autoconf ... writes 'yes' """ - parser.add_argument('command', nargs='?', + parser.add_argument('command', nargs='?', choices=['config', 'run', 'autoconf', 'drun'], help='mode munin wants to use. "run" is default' + helpCommand) return parser @@ -783,10 +783,10 @@ class Munin(object): def _envParser(self): """ reads environVars from [deb_packages] and generate - a list of dicts, each dict holds a set of settings made in + a list of dicts, each dict holds a set of settings made in munin config. - [ - { 'type' = 'installed', + [ + { 'type' = 'installed', 'sort_by' = ['label', 'archive'], 'show_ext' = ['origin', 'site'], }, @@ -816,7 +816,7 @@ class Munin(object): configPart['show_ext'][m.group('optNumber')] = os.getenv(var) else: print >> sys.stderr, "configuration option %r was ignored" % (var) - # we have now dicts for 'sort_by' and 'show_ext' keys + # we have now dicts for 'sort_by' and 'show_ext' keys # changing them to lists for graphConfig in config.itervalues(): graphConfig['sort_by'] = [val for key, val in sorted(graphConfig['sort_by'].items())] @@ -839,13 +839,13 @@ class Munin(object): "Graph must be sorted by anything" raise EnvironmentConfigBroken("Environment Config broken") # check for valid options for sort_by - unusableOptions = set(graph['sort_by']) - PackageStat.viewSet - if unusableOptions: + unusableOptions = set(graph['sort_by']) - PackageStat.viewSet + if unusableOptions: print >> sys.stderr, \ "%r are not valid options for 'sort_by'" % (unusableOptions) raise EnvironmentConfigBroken("Environment Config broken") # check for valid options for sort_by - unusableOptions = set(graph['show_ext']) - PackageStat.viewSet + unusableOptions = set(graph['show_ext']) - PackageStat.viewSet if unusableOptions: print >> sys.stderr, \ "%r are not valid options for 'show_ext'" % (x) @@ -868,7 +868,7 @@ deb_packages - plugin to monitor update resources and pending packages on Debian This plugin has checked on Debian - Wheezy and squeeze. If you want to use it on older installations, tell me whether it works or which errors you had. It -shoud run past python-apt 0.7 and python 2.5. +should run past python-apt 0.7 and python 2.5. =head1 DESCRIPTION @@ -895,7 +895,7 @@ check out this git repository from git clone git://github.com/munin-monitoring/contrib.git cd contrib/plugins/apt/deb_packages sudo cp deb_packages.py /etc/munin/plugins/deb_packages - sudo cp deb_packages.munin-conf /etc/munin/plugin-conf.d/deb_packages + sudo cp deb_packages.munin.conf /etc/munin/plugin-conf.d/deb_packages =back @@ -910,7 +910,7 @@ Verify the installation by =head1 CONFIGURATION -If you copied deb_packages.munin-conf to plugin-conf.d you have a starting point. +If you copied deb_packages.munin.conf to plugin-conf.d you have a starting point. A typical configuration looks like this diff --git a/plugins/apt/deb_packages/example/packages_label_archive_upgradable-week.png b/plugins/apt/deb_packages/example-graphs/deb_packages.py-1.png similarity index 100% rename from plugins/apt/deb_packages/example/packages_label_archive_upgradable-week.png rename to plugins/apt/deb_packages/example-graphs/deb_packages.py-1.png diff --git a/plugins/arangodb/arangodb_ b/plugins/arangodb/arangodb_ index a90863af..4ee21e92 100755 --- a/plugins/arangodb/arangodb_ +++ b/plugins/arangodb/arangodb_ @@ -1,7 +1,7 @@ #!/usr/bin/python """ -Plugin to monitor ArangoDB servers. It works with the new server statistics +Plugin to monitor ArangoDB servers. It works with the new server statistics interface of ArangoDB 1.3. Not every value seems senseful, but there are nice graphs generated... @@ -30,19 +30,19 @@ Usage: Links possible: arangodb_conn HTTP client connections arangodb_time_total Total request/queue/connection time - arangodb_bytes_total Total sent/received bytes + arangodb_bytes_total Total sent/received bytes Configuration: - No configuration required. Just enable the admin interface of ArangoDB. -Thanks to the authors of other Python munin plugins. I've used some of -them as inspiring example. +Thanks to the authors of other Python munin plugins. I've used some of +them as inspiring example. Possible todos: - support of munin-like configuration parameters - add more statistics - + """ from os.path import basename @@ -57,7 +57,7 @@ except ImportError: def getServerStatus(group): raw = urllib2.urlopen( "http://127.0.0.1:8529/_admin/statistics" ).read() - + return json.loads( raw )[group] def doData(plugin_name): @@ -66,26 +66,26 @@ def doData(plugin_name): elif plugin_name== 'arangodb_time_total': data = getServerStatus('client') - timeTotal = data['totalTime']['sum'] - timeConnection = data['connectionTime']['sum'] - timeRequest = data['requestTime']['sum'] - timeQueue = data['queueTime']['sum'] - + timeTotal = data['totalTime']['sum'] + timeConnection = data['connectionTime']['sum'] + timeRequest = data['requestTime']['sum'] + timeQueue = data['queueTime']['sum'] + print "total.value " + str(int(round(timeTotal))) print "connection.value " + str(int(round(timeConnection))) print "request.value " + str(int(round(timeRequest))) print "queue.value " + str(int(round(timeQueue))) - + elif plugin_name== 'arangodb_bytes_total': data = getServerStatus('client') bytesReceived = data['bytesReceived']['sum'] bytesSent = data['bytesSent']['sum'] print "received.value " + str(int(round(bytesReceived))) print "sent.value " + str(int(round(bytesSent))) - + else: pass - + def doConfig(plugin_name): if plugin_name == 'arangodb_conn': print "graph_title ArangoDB current connections" @@ -103,7 +103,7 @@ def doConfig(plugin_name): print "connection.label connection" print "request.label request" print "queue.label queue" - + elif plugin_name == 'arangodb_bytes_total': print "graph_title ArangoDB total bytes" print "graph_args --base 1024" @@ -125,7 +125,7 @@ def doConfig(plugin_name): else: pass - + plugin_name = basename(sys.argv[0]) diff --git a/plugins/aris/aris_players b/plugins/aris/aris_players index b783f92c..95e95cc2 100755 --- a/plugins/aris/aris_players +++ b/plugins/aris/aris_players @@ -18,7 +18,7 @@ # Parameters: # # config (required) -# +# # #%# family=manual diff --git a/plugins/arp/arp b/plugins/arp/arp deleted file mode 100755 index 896b38bb..00000000 --- a/plugins/arp/arp +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# -# Plugin to monitor total ARP entries -# -# Parameters understood: -# -# config (required) -# autoconf (optional) -# -# Made by Sven Hartge (sven AT svenhartge DOT de) -# - -#%# family=contrib -#%# capabilities=autoconf - -if [ "$1" = "autoconf" ]; then - # Search for arp - which arp >/dev/null 2>/dev/null || (echo "no (can't find arp binary)" && exit 1) - - # ...or success - echo yes - exit 0 -fi - - - -if [ "$1" = "config" ]; then - echo 'graph_title ARP entries' - echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel Entries' - echo 'graph_category network' - echo 'graph_scale no' - echo 'graph_info This graph shows the number of ARP entries registered by the system.' - echo 'entries.label ARP entries' - echo 'entries.draw LINE2' - echo 'entries.type GAUGE' - echo 'entries.info Number of ARP entries' - exit 0 -fi - -arp -an | awk 'BEGIN { regex="";} { if (!match($4,regex)) { a[$4] }} END{for(i in a){n++};print "entries.value " n}' diff --git a/plugins/arp/arp_ b/plugins/arp/arp_ index f85865b4..979d9a57 100755 --- a/plugins/arp/arp_ +++ b/plugins/arp/arp_ @@ -19,13 +19,11 @@ case "$1" in autoconf) # Search for ip - which ip >/dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "no (can't find ip binary)" - exit 1 + if which ip >/dev/null; then + echo 'yes' + else + echo "no (missing 'ip' executable)" fi - # ...or success - echo 'yes' exit 0 ;; suggest) diff --git a/plugins/arp/arp_bsd_ b/plugins/arp/arp_bsd_ old mode 100644 new mode 100755 diff --git a/plugins/assp/assp-envelope-recipient-statistics b/plugins/assp/assp-envelope-recipient-statistics index 98366914..d09d90e6 100755 --- a/plugins/assp/assp-envelope-recipient-statistics +++ b/plugins/assp/assp-envelope-recipient-statistics @@ -30,7 +30,7 @@ my $version = "1.0"; # UA Version my $agentname = "$pluginname Munin Plugin V$version"; # UA String my $url = "http://localhost:55553/"; # (defaults to localhost) my $response = 0; # the server output -my @content = (); # the content we're retrive from $response +my @content = (); # the content we retrieve from $response my %index = ( # for Version 2 'from' => 40, # <-- index frame from ( a tweak for other ASSP Versions ) 'to' => 63 # <-- index frame to ( "" ) @@ -66,20 +66,20 @@ my @muninlabel = ( ); # ============= SANITY CHECKS ================ -unless( defined(@ARGV) ){ +unless( @ARGV ){ $ARGV[0] = ""; } # =============== THE GET ==================== if( $ARGV[0] eq "" ){ my $agent = LWP::UserAgent->new(); - $agent->agent("$agentname"); + $agent->agent("$agentname"); $response = $agent->get( $url ); &response_error() unless $response->is_success; @content = split( /\n/, $response->content ); my $line = ""; - my $count = $index{from}; + my $count = $index{from}; my $label; my( $key, $value, $last ); while( 1 ){ diff --git a/plugins/assp/assp-general-runtime-information b/plugins/assp/assp-general-runtime-information index 931b0ed9..889f86c8 100755 --- a/plugins/assp/assp-general-runtime-information +++ b/plugins/assp/assp-general-runtime-information @@ -12,7 +12,7 @@ use strict; use warnings; -use File::Basename; +use File::Basename; use LWP; use Mail::Sendmail; @@ -30,7 +30,7 @@ my $version = "1.0"; # UA Version my $agentname = "$pluginname Munin Plugin V$version"; # UA String my $url = "http://localhost:55553/"; # (defaults to localhost) my $response = 0; # the server output -my @content = (); # the content we're retrive from $response +my @content = (); # the content we retrieve from $response my %index = ( # for Version 2 'from' => 2, # <-- index frame from ( a tweak for other ASSP Versions ) 'to' => 4 # <-- index frame to ( "" ) @@ -46,18 +46,18 @@ my @muninlabel = ( # General Runtime Information ); # ============= SANITY CHECKS ================ -unless( defined(@ARGV) ){ +unless( @ARGV ){ $ARGV[0] = ""; } # =============== THE GET ==================== if( $ARGV[0] eq "" ){ my $agent = LWP::UserAgent->new(); - $agent->agent("$agentname"); + $agent->agent("$agentname"); $response = $agent->get( $url ); &response_error() unless $response->is_success; @content = split( /\n/, $response->content ); - + my $line = ""; my $count = $index{from}; my $label; diff --git a/plugins/assp/assp-message-statistics b/plugins/assp/assp-message-statistics index 0657dbd0..dd13b6c7 100755 --- a/plugins/assp/assp-message-statistics +++ b/plugins/assp/assp-message-statistics @@ -17,23 +17,23 @@ use LWP; use Mail::Sendmail; # -------------------------- DEBUG VARS --------------------------------- -my $DEBUG = 0; # for debugging purpose -my $EMAILDEBUG = 0; # for email debugging -my $pluginname = &basename( "$0" ); # get the basename of the plugin -my @to = qw( webmaster@bguel.info ); # the list of admins receivced messages on an -my $from = "$pluginname-at-host\@guel.info"; # the host from where it comes -my $muninnodename = "mail.guel.info"; # the Node from where it comes -my $smtp = "mail.guel.info"; # the smtp relay to send the mail +my $DEBUG = 0; # for debugging purpose +my $EMAILDEBUG = 0; # for email debugging +my $pluginname = &basename( "$0" ); # get the basename of the plugin +my @to = qw( webmaster@bguel.info ); # the list of admins receivced messages on an +my $from = "$pluginname-at-host\@guel.info"; # the host from where it comes +my $muninnodename = "mail.guel.info"; # the Node from where it comes +my $smtp = "mail.guel.info"; # the smtp relay to send the mail # ------------------------- GLOBAL VARS --------------------------------- -my $version = "1.0"; # UA Version -my $agentname = "$pluginname Munin Plugin V$version"; # UA String -my $url = "http://localhost:55553/"; # (defaults to localhost) -my $response = 0; # the server output -my @content = (); # the content we're retrive from $response +my $version = "1.0"; # UA Version +my $agentname = "$pluginname Munin Plugin V$version"; # UA String +my $url = "http://localhost:55553/"; # (defaults to localhost) +my $response = 0; # the server output +my @content = (); # the content we retrieve from $response my %index = ( # for Version 2 - 'from' => 66, # <-- index frame from ( a tweak for other ASSP Versions ) - 'to' => 100 # <-- index frame to ( "" ) + 'from' => 66, # <-- index frame from ( a tweak for other ASSP Versions ) + 'to' => 100 # <-- index frame to ( "" ) ); @@ -78,20 +78,20 @@ my @muninlabel = ( ); # ============= SANITY CHECKS ================ -unless( defined(@ARGV) ){ +unless( @ARGV ){ $ARGV[0] = ""; } # =============== THE GET ==================== if( $ARGV[0] eq "" ){ my $agent = LWP::UserAgent->new(); - $agent->agent("$agentname"); + $agent->agent("$agentname"); $response = $agent->get( $url ); &response_error() unless $response->is_success; @content = split( /\n/, $response->content ); my $line = ""; - my $count = $index{from}; + my $count = $index{from}; my $label; my( $key, $value, $last ); while( 1 ){ diff --git a/plugins/assp/assp-smtp-connection-statistics b/plugins/assp/assp-smtp-connection-statistics index 4caabb9f..266ceaa0 100755 --- a/plugins/assp/assp-smtp-connection-statistics +++ b/plugins/assp/assp-smtp-connection-statistics @@ -30,7 +30,7 @@ my $version = "1.0"; # UA Version my $agentname = "$pluginname Munin Plugin V$version"; # UA String my $url = "http://localhost:55553/"; # (defaults to localhost) my $response = 0; # the server output -my @content = (); # the content we're retrive from $response +my @content = (); # the content we retrieve from $response my %index = ( # for Version 2 'from' => 25, # <-- index frame from ( a tweak for other ASSP Versions ) 'to' => 38 # <-- index frame to ( "" ) @@ -56,20 +56,20 @@ my @muninlabel = ( ); # ============= SANITY CHECKS ================ -unless( defined(@ARGV) ){ +unless( @ARGV ){ $ARGV[0] = ""; } # =============== THE GET ==================== if( $ARGV[0] eq "" ){ my $agent = LWP::UserAgent->new(); - $agent->agent("$agentname"); + $agent->agent("$agentname"); $response = $agent->get( $url ); &response_error() unless $response->is_success; @content = split( /\n/, $response->content ); my $line = ""; - my $count = $index{from}; + my $count = $index{from}; my $label; my( $key, $value, $last ); while( 1 ){ diff --git a/plugins/assp/assp-smtp-handler-statistics b/plugins/assp/assp-smtp-handler-statistics index c43570f6..981fec05 100755 --- a/plugins/assp/assp-smtp-handler-statistics +++ b/plugins/assp/assp-smtp-handler-statistics @@ -12,7 +12,7 @@ use strict; use warnings; -use File::Basename; +use File::Basename; use LWP; use Mail::Sendmail; @@ -30,7 +30,7 @@ my $version = "1.0"; # UA Version my $agentname = "$pluginname Munin Plugin V$version"; # UA String my $url = "http://localhost:55553/"; # (defaults to localhost) my $response = 0; # the server output -my @content = (); # the content we're retrive from $response +my @content = (); # the content we retrieve from $response my %index = ( # for Version 2 'from' => 4, # <-- index frame from ( a tweak for other ASSP Versions ) 'to' => 5 # <-- index frame to ( "" ) @@ -46,18 +46,18 @@ my @muninlabel = ( # General Runtime Information ); # ============= SANITY CHECKS ================ -unless( defined(@ARGV) ){ +unless( @ARGV ){ $ARGV[0] = ""; } # =============== THE GET ==================== if( $ARGV[0] eq "" ){ my $agent = LWP::UserAgent->new(); - $agent->agent("$agentname"); + $agent->agent("$agentname"); $response = $agent->get( $url ); &response_error() unless $response->is_success; @content = split( /\n/, $response->content ); - + my $line = ""; my $count = $index{from}; my $label; diff --git a/plugins/asterisk/asterisk b/plugins/asterisk/asterisk index abd87f2c..777a34cd 100755 --- a/plugins/asterisk/asterisk +++ b/plugins/asterisk/asterisk @@ -16,8 +16,11 @@ This plugin will produce multiple graphs showing: - the number of messages in all voicemail boxes (replaces asterisk_voicemail); - - the number of active MeetMe conferences and users connected to them - (replace asterisk_meetme and asterisk_meetmeusers, respectively); + - DEPRECATED: the number of active MeetMe conferences and users connected to + them (replace asterisk_meetme and asterisk_meetmeusers, respectively); + + - the number of active ConfBridge conferences (e.g. non-empty ones) and users + connected to them - the number of active channels for a given codec, for both SIP and IAX2 channels (replaces asterisk_sipchannels and asterisk_codecs). @@ -27,13 +30,15 @@ This plugin will produce multiple graphs showing: The following configuration parameters are used by this plugin [asterisk] - env.host - hostname to connect to - env.port - port number to connect to - env.username - username used for authentication - env.secret - secret used for authentication - env.channels - The channel types to look for - env.codecsx - List of codec IDs (hexadecimal values) - env.codecs - List of codecs names, matching codecsx order + env.host - hostname to connect to + env.port - port number to connect to + env.username - username used for authentication + env.secret - secret used for authentication + env.channels - The channel types to look for + env.codecsx - List of codec IDs (hexadecimal values) + env.codecs - List of codecs names, matching codecsx order + env.enable_meetme - Set to 1 to enable graphs for the MeetMe application + env.enable_confbridge - Set to 1 to enable graphs for the ConfBridge application The "username" and "secret" parameters are mandatory, and have no defaults. @@ -46,6 +51,8 @@ defaults. env.channels Zap IAX2 SIP env.codecsx 0x2 0x4 0x8 env.codecs gsm ulaw alaw + env.enable_meetme 0 + env.enable_confbridge 1 =head2 WILDCARD CONFIGURATION @@ -77,30 +84,34 @@ use strict; use Munin::Plugin; use IO::Socket; +# See the following and its subpages for change history in the AMI protocol: +# https://wiki.asterisk.org/wiki/display/AST/Asterisk+Manager+Interface+%28AMI%29+Changes sub asterisk_command { my ($socket, $command) = @_; my $line, my $reply; $socket->print("Action: command\nCommand: $command\n\n"); - # Response: (Error|Follows|???) + # Response: (Error|Follows|Success) $line = $socket->getline; - if ($line !~ /^Response: Follows\r?\n$/) { - while ( $line = $socket->getline and $line !~ /^\r?\n$/ ) {} + if ($line !~ /^Response: Success\r?\n$/) { + while ( $line = $socket->getline and $line !~ /^\r?\n$/ ) { + print STDERR "COMMAND: Ignoring unwanted line: $line" if $Munin::Plugin::DEBUG; + } return undef; } - # Privilege: Command + # Message: Command output follows $line = $socket->getline; + print STDERR "COMMAND got response: $line" if $Munin::Plugin::DEBUG; # Until we get the --END COMMAND-- marker, it's the command's output. - while ( $line = $socket->getline and $line !~ /^--END COMMAND--\r?\n$/ ) { + while ( $line = $socket->getline and $line =~ /^Output:/ ) { + print STDERR "COMMAND: got response: $line" if $Munin::Plugin::DEBUG; + # Don't keep the "Output: " part of the response + substr($line, 0, 8, ''); $reply .= $line; } - - # And then wait for the empty line that says we're done - while ( $line = $socket->getline and $line !~ /^\r?\n$/ ) {} - return $reply; } @@ -117,10 +128,13 @@ my @CHANNELS = exists $ENV{'channels'} ? split ' ',$ENV{'channels'} : qw(Zap IAX my @CODECS = exists $ENV{'codecs'} ? split ' ',$ENV{'codecs'} : qw(gsm ulaw alaw); my @CODECSX = exists $ENV{'codecsx'} ? split ' ',$ENV{'codecsx'} : qw(0x2 0x4 0x8); +my $meetme_enabled = $ENV{'enable_meetme'} || '0'; +my $confbridge_enabled = $ENV{'enable_confbridge'} || '1'; + my $line, my $error; my $socket = new IO::Socket::INET(PeerAddr => $peeraddr, - PeerPort => $peerport, - Proto => 'tcp') + PeerPort => $peerport, + Proto => 'tcp') or $error = "Could not create socket: $!"; if ( $socket ) { @@ -177,7 +191,8 @@ graph_category voip total.label Total messages END -print < /dev/null if [ $? = "0" ]; then echo yes - exit 0 else echo no - exit 1 fi else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/audit/portaudit b/plugins/audit/portaudit index f067e1eb..e4b63ceb 100755 --- a/plugins/audit/portaudit +++ b/plugins/audit/portaudit @@ -7,7 +7,7 @@ # config (required) # autoconf (optional - used by munin-config) # -# Magick markers (optional - used by munin-config and som installation +# Magick markers (optional - used by munin-config and some installation # scripts): #%# family=auto #%# capabilities=autoconf @@ -16,7 +16,7 @@ PORTAUDIT=/usr/local/sbin/portaudit if [ "$1" = "autoconf" ]; then if [ -x $PORTAUDIT ]; then - echo yes + echo yes exit 0 else echo no '(portaudit not found)' diff --git a/plugins/backup/backup.sh b/plugins/backup/backup.sh index 492a9fed..5ee33559 100755 --- a/plugins/backup/backup.sh +++ b/plugins/backup/backup.sh @@ -11,6 +11,7 @@ case $1 in cat <<'EOM' graph_title Number of young files at backup directory graph_vlabel number +graph_category backup autobackup.label number autobackup.critical 1: EOM @@ -18,5 +19,4 @@ EOM esac printf "autobackup.value " -find $BACKUP_DIR -mtime -$LIFETIME | wc -l - +find "$BACKUP_DIR" -mtime "-$LIFETIME" | wc -l diff --git a/plugins/backup/example-graphs/fresh_backups-week.png b/plugins/backup/example-graphs/fresh_backups-week.png new file mode 100644 index 00000000..384474e8 Binary files /dev/null and b/plugins/backup/example-graphs/fresh_backups-week.png differ diff --git a/plugins/backup/fresh-backups b/plugins/backup/fresh-backups new file mode 100755 index 00000000..3d2d7a42 --- /dev/null +++ b/plugins/backup/fresh-backups @@ -0,0 +1,121 @@ +#!/bin/bash -u + +: << =cut + +=head1 NAME + +fresh-backups - Plugin to monitor the freshness of backup files + +=head1 APPLICABLE SYSTEMS + +Any system with some automated backup creating or updating archive files. + +This works well with backup-manager. + +=head1 CONFIGURATION + +The following example checks all tar.bz2 files in /path/to/your/backups/, and +counts all those that are less than 2 days old, and there should be 4 separate +daily archives. + + [fresh-backups] + user root + env.backup_dir /path/to/your/backups/ + env.lifetime 2 + env.archive_pattern *.tar.bz2 + env.backup_number 4 + +This will also set the warning and critical values for this plugin to 2*4 and +4, respectively, meaning that if the number of fresh files goes below those +limits, the relevant notifications will be triggered. + +An example configuration snippet for backup-manager [0] follows. + + export BM_REPOSITORY_ROOT="/path/to/your/backups" + export BM_TARBALL_FILETYPE="tar.bz2" + export BM_TARBALL_DIRECTORIES="/etc /home /srv /data" + +[0] https://github.com/sukria/Backup-Manager + +=head1 AUTHOR + +Copyright (C) 2016,2019 Olivier Mehani + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + +=head1 MAGIC MARKERS + + #%# family=manual + +=cut + +# Bash is needed for this array to work +COLOUR=(00FF00 24DA00 48B600 6D9100 916D00 B64800 DA2400 FF0000) # green to red + +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + set -x +fi +# Configuration directives, edit before first use. +BACKUP_DIR=${backup_dir:-/data/backup} +ARCHIVE_PATTERN="${archive_pattern:-*.tar.bz2}" +# How old backups should be considered as non-young anymore in [days]. +LIFETIME=${lifetime:-2} +# Critical states will be issued when the number of fresh backups archives is below `backup_number`, +# and warnings below `backup_number*lifetime - 1` +CRIT=${backup_number:-1} +# We should have at least LIFETIME-1 complete backups when the new one is in progress +WARN=$((CRIT*(LIFETIME-1))) + +# The situation is critical if there are no young files, the backup is down. +case ${1:-} in + config) + cat << EOF +graph_title Fresh backups +graph_info Number of fresh (<=${LIFETIME}d) backups archives in ${BACKUP_DIR} +graph_args -l 0 +graph_category backup +EOF +for AGE in $(seq "${LIFETIME}" -1 0); do + if [ "${AGE}" = 0 ]; then + echo "age${AGE}.label today" + else + echo "age${AGE}.label older than $((AGE*24))h" + fi + cat << EOF +age${AGE}.draw AREASTACK +age${AGE}.colour ${COLOUR[$AGE]} +age${AGE}.warning +age${AGE}.critical +EOF +done +cat << EOF +freshcount.label ${ARCHIVE_PATTERN} files fresher than ${LIFETIME}d +freshcount.critical ${CRIT}: +freshcount.warning ${WARN}: +freshcount.colour 0080FF +EOF + exit 0;; +esac + +for AGE in $(seq "${LIFETIME}" -1 0); do + FILES=$(find "${BACKUP_DIR}" \ + -name "${ARCHIVE_PATTERN}" \ + -mmin "-$(((AGE+1)*60*24))" \ + -not -mmin "-$(((AGE)*60*24))" \ + ) + COUNT="$(echo "${FILES}" \ + | wc -l)" + echo "age${AGE}.value $((COUNT))" + # shellcheck disable=SC2086 + echo "age${AGE}.extinfo $(echo ${FILES} | sort | sed "s^${BACKUP_DIR}^^g")" +done + +COUNT=$(find "${BACKUP_DIR}" \ + -name "${ARCHIVE_PATTERN}" \ + -mmin "-$(((LIFETIME+1)*60*24))" \ + | wc -l) +# The last count is also our total count +echo "freshcount.value ${COUNT}" +echo "freshcount.extinfo $(du -sh "${BACKUP_DIR}")" diff --git a/plugins/backuppc/backuppc b/plugins/backuppc/backuppc index ea5f3cf8..db74d222 100755 --- a/plugins/backuppc/backuppc +++ b/plugins/backuppc/backuppc @@ -18,9 +18,12 @@ HOSTS=$(cd ${PCDIR} 2>/dev/null && ls -1) . $MUNIN_LIBDIR/plugins/plugin.sh if [ "$1" = "autoconf" ]; then - [ ! -z "${HOSTS}" ] && echo "yes" && exit 0 - echo "no" - exit 1 + if [ -n "$HOSTS" ]; then + echo "yes" + else + echo "no" + fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/bacula/bacula_job b/plugins/bacula/bacula_job index 0774712e..65d6e8ff 100755 --- a/plugins/bacula/bacula_job +++ b/plugins/bacula/bacula_job @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # Copyright (C) 2009 Andreas Thienemann # @@ -22,145 +22,149 @@ # # Parameters: # -# config (required) -# autoconf (optional - only used by munin-config) +# config (required) +# autoconf (optional - only used by munin-config) # # Magic markers (optional - only used by munin-config and some # installation scripts): # -#%# family=contrib -#%# capabilities=autoconf +# #%# family=contrib +# #%# capabilities=autoconf import subprocess -import time import sys import re import os + def parse_running_jobs(): - """ Parse the bconsole output once to get the running jobs """ + """ Parse the bconsole output once to get the running jobs """ - bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) - stdout, stderr = bconsole.communicate("status\n1\nstatus\n3\n.") + bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) + stdout, stderr = bconsole.communicate("status\n1\nstatus\n3\n.") - jobs = [] - clients = [] - clientlist = False + jobs = [] + clients = [] + clientlist = False - # Hold the line numbers for devices - dev_line = [] - input = stdout.split("\n") + # Hold the line numbers for devices + input_lines = stdout.split("\n") - for line, i in zip(input, range(0, len(input))): - if line.startswith("Connecting to Director "): - hostname = line.split()[-1].split(":")[0] + for line, i in zip(input_lines, range(0, len(input_lines))): + if line.startswith("Connecting to Director "): + hostname = line.split()[-1].split(":")[0] - if line.endswith(" is running"): - jobs.append(line.split()[2].split(".")[0]) + if line.endswith(" is running"): + jobs.append(line.split()[2].split(".")[0]) - # Parse the clientlist, warning, order of statements is important - if line.startswith("Select Client (File daemon) resource"): - clientlist = False + # Parse the clientlist, warning, order of statements is important + if line.startswith("Select Client (File daemon) resource"): + clientlist = False - if clientlist is True: - client_id, client_name = line.split() - client_clean = re.sub("^[^A-Za-z_]", "_", client_name, 1) - client_clean = re.sub("[^A-Za-z0-9_]", "_", client_clean, 0) - clients.append((client_name, client_clean, client_id[:-1])) + if clientlist is True: + client_id, client_name = line.split() + client_clean = re.sub("^[^A-Za-z_]", "_", client_name, 1) + client_clean = re.sub("[^A-Za-z0-9_]", "_", client_clean, 0) + clients.append((client_name, client_clean, client_id[:-1])) - if line.startswith("The defined Client resources are:"): - clientlist = True + if line.startswith("The defined Client resources are:"): + clientlist = True - return hostname, jobs, clients + return hostname, jobs, clients def parse(clients): - """ Parse the bconsole output """ + """ Parse the bconsole output """ - query_str = "" - for client in clients: - query_str = query_str + "status\n3\n" + client[1] + "\n" - query_str = query_str + "quit" + query_str = "" + for client in clients: + query_str = query_str + "status\n3\n" + client[1] + "\n" + query_str = query_str + "quit" - bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) - stdout, stderr = bconsole.communicate(query_str) + bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) + stdout, stderr = bconsole.communicate(query_str) - input = stdout.split("\n") + input_lines = stdout.split("\n") - jobstats = [] + jobstats = [] - for line, pos in zip(input, range(0, len(input))): + for line, pos in zip(input_lines, range(0, len(input_lines))): - # Get the client name - if line.startswith("Connecting to Client "): - # client_name = input[pos].split()[3].split(".")[0] - client_name = line.split()[3] - client_clean = re.sub("^[^A-Za-z_]", "_", client_name, 1) - client_clean = re.sub("[^A-Za-z0-9_]", "_", client_clean, 0) + # Get the client name + if line.startswith("Connecting to Client "): + # client_name = input_lines[pos].split()[3].split(".")[0] + client_name = line.split()[3] + client_clean = re.sub("^[^A-Za-z_]", "_", client_name, 1) + client_clean = re.sub("[^A-Za-z0-9_]", "_", client_clean, 0) - # Get the current bytes - if line.endswith(" is running."): - bytes = long(input[pos+2].split()[1].split("=")[1].replace(",", "")) - jobstats.append([client_name, client_clean, bytes]) + # Get the current bytes + if line.endswith(" is running."): + bytes_count_text = input_lines[pos + 2].split()[1].split("=")[1].replace(",", "") + try: + # python2 + bytes_count = long(bytes_count_text) + except NameError: + # python3 + bytes_count = int(bytes_count_text) + jobstats.append([client_name, client_clean, bytes_count]) - job_dict = {} - for job in jobstats: - job_dict[job[0].split("-")[0]] = job + job_dict = {} + for job in jobstats: + job_dict[job[0].split("-")[0]] = job - return job_dict + return job_dict def print_config(): - hostname, jobs, clients = parse_running_jobs() - print "graph_title Bacula Job throughput" - print "graph_vlabel bytes per ${graph_period}" - print "graph_args --base 1024 -l 0" - print "graph_scale yes" - print "graph_info Bacula Job measurement." - print "graph_category backup" - print "graph_order", - for fd in clients: - print fd[1], - print - if os.getenv("report_hostname") is not None and \ - os.getenv("report_hostname").upper() in ["YES", "TRUE", "1", "Y"]: - print "host_name", hostname - for client in clients: - print "%s.label %s" % (client[1], client[0]) - print "%s.type DERIVE" % (client[1]) - print "%s.min 0" % (client[1]) -# print "%s.max %s" % (client[1], str(1024*1024*1024*16)) -# print "%s.cdef up,8,*" (client[1]) - sys.exit(0) + hostname, jobs, clients = parse_running_jobs() + print("graph_title Bacula Job throughput") + print("graph_vlabel bytes per ${graph_period}") + print("graph_args --base 1024 -l 0") + print("graph_scale yes") + print("graph_info Bacula Job measurement.") + print("graph_category backup") + print("graph_order", " ".join(fd[1] for fd in clients)) + print() + if ((os.getenv("report_hostname") is not None) + and (os.getenv("report_hostname").upper() in ["YES", "TRUE", "1", "Y"])): + print("host_name", hostname) + for client in clients: + print("%s.label %s" % (client[1], client[0])) + print("%s.type DERIVE" % (client[1])) + print("%s.min 0" % (client[1])) +# print("%s.max %s" % (client[1], str(1024*1024*1024*16))) +# print("%s.cdef up,8,*" (client[1])) + sys.exit(0) if "config" in sys.argv[1:]: - print_config() + print_config() elif "autoconf" in sys.argv[1:]: - for dir in os.getenv("PATH").split(":"): - for root, dirs, files in os.walk(dir): - if "bconsole" in files: - print "yes" - sys.exit(0) - print "no" - sys.exit(1) + for directory in os.getenv("PATH").split(":"): + for root, dirs, files in os.walk(directory): + if "bconsole" in files: + print("yes") + sys.exit(0) + print("no") + sys.exit(0) elif "suggest" in sys.argv[1:]: - sys.exit(1) + sys.exit(1) else: - hostname, jobs, clients = parse_running_jobs() - str = [] - for client in clients: - if client[0].split("-")[0] in jobs: - str.append((client[0], client[2])) + hostname, jobs, clients = parse_running_jobs() + client_pairs = [] + for client in clients: + if client[0].split("-")[0] in jobs: + client_pairs.append((client[0], client[2])) - client_values = parse(str) + client_values = parse(client_pairs) - for client in clients: - client_name_short = client[0].split("-")[0] - if client_name_short in client_values: - print "%s.value %s" % (client_values[client_name_short][1], client_values[client_name_short][2]) - else: - print "%s.value %s" % (client[1], "0") + for client in clients: + client_name_short = client[0].split("-")[0] + if client_name_short in client_values: + print("%s.value %s" % (client_values[client_name_short][1], + client_values[client_name_short][2])) + else: + print("%s.value %s" % (client[1], "0")) - sys.exit(0) + sys.exit(0) diff --git a/plugins/bacula/bacula_sd b/plugins/bacula/bacula_sd index 7cb0922d..7d1fc023 100755 --- a/plugins/bacula/bacula_sd +++ b/plugins/bacula/bacula_sd @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # Copyright (C) 2009 Andreas Thienemann # @@ -15,126 +15,129 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # - +# # # Munin Plugin to get storage device throughput for Bacula by parsing the bconsole # output. # # Parameters: # -# config (required) -# autoconf (optional - only used by munin-config) +# config (required) +# autoconf (optional - only used by munin-config) +# # - # Magic markers (optional - only used by munin-config and some # installation scripts): # -#%# family=contrib -#%# capabilities=autoconf +# #%# family=contrib +# #%# capabilities=autoconf import subprocess -import time import sys import re import os + def parse_devices(): - """ Parse the bconsole output once to get the device names """ + """ Parse the bconsole output once to get the device names """ - bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) - stdout, stderr = bconsole.communicate("status\n2") + bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) + stdout, stderr = bconsole.communicate("status\n2") - devs = [] + devs = [] - # Hold the line numbers for devices - dev_line = [] - input = stdout.split("\n") + # Hold the line numbers for devices + dev_line = [] + input_lines = stdout.split("\n") - for line, i in zip(input, range(0, len(input))): - if line.startswith("Connecting to Storage daemon "): - hostname = line.split()[-1].split(":")[0] - if line.startswith("Device \""): - dev_line.append(i) + for line, i in zip(input_lines, range(0, len(input_lines))): + if line.startswith("Connecting to Storage daemon "): + hostname = line.split()[-1].split(":")[0] + if line.startswith("Device \""): + dev_line.append(i) - for pos in dev_line: - # Get the device name - dev_name = input[pos].split()[1][1:-1] - dev_dev = input[pos].split()[2][1:-1] - dev_dev_clean = re.sub("^[^A-Za-z_]", "_", dev_dev, 1) - dev_dev_clean = re.sub("[^A-Za-z0-9_]", "_", dev_dev_clean, 0) - devs.append([dev_name, dev_dev, dev_dev_clean]) + for pos in dev_line: + # Get the device name + dev_name = input_lines[pos].split()[1][1:-1] + dev_dev = input_lines[pos].split()[2][1:-1] + dev_dev_clean = re.sub("^[^A-Za-z_]", "_", dev_dev, 1) + dev_dev_clean = re.sub("[^A-Za-z0-9_]", "_", dev_dev_clean, 0) + devs.append([dev_name, dev_dev, dev_dev_clean]) - return hostname, devs + return hostname, devs def parse(): - """ Parse the bconsole output """ + """ Parse the bconsole output """ - bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) - stdout, stderr = bconsole.communicate("status\n2") + bconsole = subprocess.Popen("bconsole", stdin=subprocess.PIPE, stdout=subprocess.PIPE) + stdout, stderr = bconsole.communicate("status\n2") - devstats = [] + devstats = [] - # Hold the line numbers for devices - dev_line = [] - input = stdout.split("\n") + # Hold the line numbers for devices + dev_line = [] + input_lines = stdout.split("\n") - for line, i in zip(input, range(0, len(input))): - if line.startswith("Device \""): - dev_line.append(i) + for line, i in zip(input_lines, range(0, len(input_lines))): + if line.startswith("Device \""): + dev_line.append(i) - for pos in dev_line: - # Get the device name - dev_dev = input[pos].split()[2][1:-1] - dev_dev_clean = re.sub("^[^A-Za-z_]", "_", dev_dev, 1) - dev_dev_clean = re.sub("[^A-Za-z0-9_]", "_", dev_dev_clean, 0) + for pos in dev_line: + # Get the device name + dev_dev = input_lines[pos].split()[2][1:-1] + dev_dev_clean = re.sub("^[^A-Za-z_]", "_", dev_dev, 1) + dev_dev_clean = re.sub("[^A-Za-z0-9_]", "_", dev_dev_clean, 0) - # Get the current bytes - if input[pos].endswith("is mounted with:"): - bytes = long(input[pos+5].split()[1].split("=")[1].replace(",", "")) - devstats.append([dev_dev, dev_dev_clean, bytes]) - else: - devstats.append([dev_dev, dev_dev_clean, 0]) + # Get the current bytes + if input_lines[pos].endswith("is mounted with:"): + bytes_count_text = input_lines[pos + 5].split()[1].split("=")[1].replace(",", "") + try: + bytes_count = long(bytes_count_text) + except NameError: + bytes_count = int(bytes_count_text) + devstats.append([dev_dev, dev_dev_clean, bytes_count]) + else: + devstats.append([dev_dev, dev_dev_clean, 0]) - return devstats + return devstats def print_config(): - hostname, devstats = parse_devices() - print "graph_title Bacula Storage Daemon throughput" - print "graph_vlabel bytes per ${graph_period}" - print "graph_args --base 1024 -l 0" - print "graph_scale yes" - print "graph_info Bacula Storage Daemon througput measurement based on written bytes. This may be somewhat inacurate whenever a tape is changed." - print "graph_category backup" - print "graph_order", - for dev in devstats: - print dev[2], - print - if os.getenv("report_hostname") is not None and \ - os.getenv("report_hostname").upper() in ["YES", "TRUE", "1", "Y"]: - print "host_name", hostname - for dev in devstats: - print "%s.label %s" % (dev[2], dev[1]) - print "%s.type DERIVE" % (dev[2]) - print "%s.min 0" % (dev[2]) -# print "%s.max %s" % (dev[2], str(1024*1024*1024*16)) -# print "%s.cdef up,8,*" (dev[2]) - sys.exit(0) + hostname, devstats = parse_devices() + print("graph_title Bacula Storage Daemon throughput") + print("graph_vlabel bytes per ${graph_period}") + print("graph_args --base 1024 -l 0") + print("graph_scale yes") + print("graph_info Bacula Storage Daemon througput measurement based on written bytes. " + "This may be somewhat inaccurate whenever a tape is changed.") + print("graph_category backup") + print("graph_order", " ".join([dev[2] for dev in devstats])) + print() + if (os.getenv("report_hostname") is not None + and (os.getenv("report_hostname").upper() in ["YES", "TRUE", "1", "Y"])): + print("host_name", hostname) + for dev in devstats: + print("%s.label %s" % (dev[2], dev[1])) + print("%s.type DERIVE" % (dev[2])) + print("%s.min 0" % (dev[2])) +# print("%s.max %s" % (dev[2], str(1024*1024*1024*16))) +# print("%s.cdef up,8,*" (dev[2])) + sys.exit(0) if "config" in sys.argv[1:]: - print_config() + print_config() elif "autoconf" in sys.argv[1:]: - for dir in os.getenv("PATH").split(":"): - for root, dirs, files in os.walk(dir): - if "bconsole" in files: - print "yes" - sys.exit(0) - print "no" - sys.exit(1) + for directory in os.getenv("PATH").split(":"): + for root, dirs, files in os.walk(directory): + if "bconsole" in files: + print("yes") + sys.exit(0) + print("no") + sys.exit(0) elif "suggest" in sys.argv[1:]: - sys.exit(1) + sys.exit(1) else: - for dev in parse(): - print "%s.value %s" % (dev[1], dev[2]) + for dev in parse(): + print("%s.value %s" % (dev[1], dev[2])) diff --git a/plugins/battery/acpi-battery b/plugins/battery/acpi-battery index 147ee3fd..4ea36752 100755 --- a/plugins/battery/acpi-battery +++ b/plugins/battery/acpi-battery @@ -32,11 +32,10 @@ if [ "$1" = "autoconf" ]; then if grep -q 'present.*yes' /proc/acpi/battery/*/info > /dev/null 2>&1; then echo yes - exit 0 else echo "no (battery not detected)" - exit 1 fi + exit 0 fi cd /proc/acpi/battery diff --git a/plugins/battery/acpi_batt_ b/plugins/battery/acpi_batt_ index a03e4b1c..b96578b7 100755 --- a/plugins/battery/acpi_batt_ +++ b/plugins/battery/acpi_batt_ @@ -1,48 +1,70 @@ #!/usr/bin/perl -w -# -*- perl -*- +# -*- perl -*- =head1 NAME -acpi_batt_ Munin plugin to monitor the (note|net)book battery states through procfs +acpi_batt_ - monitor the (note|net)book battery states through procfs =head1 APPLICABLE SYSTEMS -Notebooks and netbooks with avialable /proc/acpi/battery +Notebooks and netbooks with available /proc/acpi/battery =head1 CONFIGURATION -Configured change the name of symbolic link +The name of the plugin (or its symbolic link) determines the retrieved data. + +=over + +=item acpi_batt_X_capacity: chart of Design capacity, Last full capacity, Design capacity low, Design capacity warning, Capacity granularity 1, Capacity granularity 2, Remaining capacity, Present rate (mA) + +=item acpi_batt_X_percents: percentage chart of Current voltage, Current capacity, Full capacity (of design) + +=item acpi_batt_X_voltage: chart of Design voltage, Present voltage + +=back -acpi_batt_X_capacity - chart of Design capacity, Last full capacity, Design capacity low, - Design capacity warning, Capacity granularity 1, Capacity granularity 2, - Remaining capacity, Present rate (mA) -acpi_batt_X_percents - percentage chart of Current voltage, Current capacity, Full capacity (of design) -acpi_batt_X_voltage - chart of Design voltage, Present voltage Where X is the number of battery from /proc/acpi/battery/BATX =head1 INTERPRETATION The plugin shows: - Design capacity - Last full capacity - Design capacity low - Design capacity warning - Capacity granularity 1 - Capacity granularity 2 - Remaining capacity - Present rate (mA) - Percentage Current/design voltage - Percentage Full/current capacity - Percentage Design/full capacity - Design voltage - Present voltage + +=over + +=item Design capacity + +=item Last full capacity + +=item Design capacity low + +=item Design capacity warning + +=item Capacity granularity 1 + +=item Capacity granularity 2 + +=item Remaining capacity + +=item Present rate (mA) + +=item Percentage Current/design voltage + +=item Percentage Full/current capacity + +=item Percentage Design/full capacity + +=item Design voltage + +=item Present voltage + +=back + =head1 MAGIC MARKERS -#%# family=power + #%# family=power -=head1 VERSION -=head1 BUGS +=head1 BUGS None known. @@ -98,8 +120,8 @@ sub read_data { $val = "unknown"; } - elsif ( $var ne "batery type" or $var ne "serial number" or $var ne "OEM info" ) - { + elsif ( $var ne "battery type" or $var ne "serial number" or $var ne "OEM info" ) + { @tmp = split(" " ,$val); $val = trim($tmp[0]); } @@ -120,8 +142,8 @@ if ($ARGV[0] and $ARGV[0] eq "config") { my $batt_name = sprintf("%s %s %s", $batt_data->{'info'}{'OEM info'}, $batt_data->{'info'}{'battery type'}, $batt_data->{'info'}{'model number'}); print ("graph_args --base 1000\n"); - printf ("graph_title Battery %s (%s) %s\n" , $batt_num, $batt_name, $graph_type); - printf ("graph_info This graph shows battery %s (%s) %s\n" , $batt_num, $batt_name, $graph_type); + printf ("graph_title Battery %d (%s) %s\n" , $batt_num, $batt_name, $graph_type); + printf ("graph_info This graph shows battery %d (%s) %s\n" , $batt_num, $batt_name, $graph_type); print ("graph_category sensors\n"); if ($graph_type eq "capacity") { @@ -160,31 +182,23 @@ sub percent if ($graph_type eq "capacity") { - printf ("dc.value %s\n", $batt_data->{'info'}{'design capacity'}); - printf ("lfc.value %s\n", $batt_data->{'info'}{'last full capacity'}); - printf ("dcl.value %s\n", $batt_data->{'info'}{'design capacity low'}); - printf ("dcw.value %s\n", $batt_data->{'info'}{'design capacity warning'}); - printf ("cg1.value %s\n", $batt_data->{'info'}{'capacity granularity 1'}); - printf ("cg2.value %s\n", $batt_data->{'info'}{'capacity granularity 2'}); - printf ("rc.value %s\n", $batt_data->{'state'}{'remaining capacity'}); - printf ("pr.value %s\n", $batt_data->{'state'}{'present rate'}); + printf ("dc.value %d\n", $batt_data->{'info'}{'design capacity'}); + printf ("lfc.value %d\n", $batt_data->{'info'}{'last full capacity'}); + printf ("dcl.value %d\n", $batt_data->{'info'}{'design capacity low'}); + printf ("dcw.value %d\n", $batt_data->{'info'}{'design capacity warning'}); + printf ("cg1.value %d\n", $batt_data->{'info'}{'capacity granularity 1'}); + printf ("cg2.value %d\n", $batt_data->{'info'}{'capacity granularity 2'}); + printf ("rc.value %d\n", $batt_data->{'state'}{'remaining capacity'}); + printf ("pr.value %d\n", $batt_data->{'state'}{'present rate'}); } elsif ($graph_type eq "voltage") { - printf ("d.value %s\n", $batt_data->{'info'}{'design voltage'}); - printf ("p.value %s\n", $batt_data->{'state'}{'present voltage'}); + printf ("d.value %d\n", $batt_data->{'info'}{'design voltage'}); + printf ("p.value %d\n", $batt_data->{'state'}{'present voltage'}); } elsif ($graph_type eq "percents") { - printf ("cv.value %s\n", percent($batt_data->{'info'}{'design voltage'},$batt_data->{'state'}{'present voltage'})); - printf ("cc.value %s\n", percent($batt_data->{'info'}{'design capacity'},$batt_data->{'state'}{'remaining capacity'})); - printf ("fc.value %s\n", percent($batt_data->{'info'}{'design capacity'},$batt_data->{'info'}{'last full capacity'})); + printf ("cv.value %d\n", percent($batt_data->{'info'}{'design voltage'},$batt_data->{'state'}{'present voltage'})); + printf ("cc.value %d\n", percent($batt_data->{'info'}{'design capacity'},$batt_data->{'state'}{'remaining capacity'})); + printf ("fc.value %d\n", percent($batt_data->{'info'}{'design capacity'},$batt_data->{'info'}{'last full capacity'})); } - - - - - - - - diff --git a/plugins/battery/acpi_sys_batt_ b/plugins/battery/acpi_sys_batt_ index e8937610..60c87c3b 100755 --- a/plugins/battery/acpi_sys_batt_ +++ b/plugins/battery/acpi_sys_batt_ @@ -1,5 +1,5 @@ #!/usr/bin/perl -w -# -*- perl -*- +# -*- perl -*- =head1 NAME @@ -7,7 +7,7 @@ acpi_sys_batt_ Munin plugin to monitor the (note|net)book battery states through =head1 APPLICABLE SYSTEMS -Notebooks and netbooks with avialable /sys/class/power_supply +Notebooks and netbooks with available /sys/class/power_supply =head1 CONFIGURATION @@ -36,8 +36,8 @@ The plugin shows: #%# family=power -=head1 VERSION -=head1 BUGS +=head1 VERSION +=head1 BUGS None known. @@ -63,7 +63,7 @@ if ($0 =~ /^(?:|.*\/)acpi_sys_batt_([^_]+)_(.+)$/) elsif (!defined($batt_num) or !defined($graph_type)) { die "# Error: couldn't understand what I'm supposed to monitor."; } -my $sys_path=sprintf("/sys/class/power_supply/BAT%s", $batt_num); +my $sys_path=sprintf("/sys/class/power_supply/BAT%d", $batt_num); #print "$batt_num, $graph_type \n"; @@ -97,8 +97,8 @@ if ($ARGV[0] and $ARGV[0] eq "config") my $batt_name = sprintf("%s %s %s %s (sn: %s)", cat_file('technology'), cat_file('type'), cat_file('manufacturer'), cat_file('model_name'), cat_file('serial_number')); print ("graph_args --base 1000\n"); - printf ("graph_title Battery %s (%s) %s\n" , $batt_num, $batt_name, $graph_type); - printf ("graph_info This graph shows battery %s (%s) %s\n" , $batt_num, $batt_name, $graph_type); + printf ("graph_title Battery %d (%s) %s\n" , $batt_num, $batt_name, $graph_type); + printf ("graph_info This graph shows battery %d (%s) %s\n" , $batt_num, $batt_name, $graph_type); print ("graph_category sensors\n"); if ($graph_type eq "capacity") { @@ -144,24 +144,22 @@ sub percent if ($graph_type eq "capacity") { - printf ("cfd.value %s\n", cat_file('charge_full_design')); - printf ("cf.value %s\n", cat_file('charge_full')); - printf ("cn.value %s\n", cat_file('charge_now')); + printf ("cfd.value %d\n", cat_file('charge_full_design')); + printf ("cf.value %d\n", cat_file('charge_full')); + printf ("cn.value %d\n", cat_file('charge_now')); } elsif ($graph_type eq "voltage") { - printf ("vmd.value %s\n", cat_file('voltage_min_design')); - printf ("vn.value %s\n", cat_file('voltage_now')); + printf ("vmd.value %d\n", cat_file('voltage_min_design')); + printf ("vn.value %d\n", cat_file('voltage_now')); } elsif ($graph_type eq "current") { - printf ("currn.value %s\n", cat_file('current_now')); + printf ("currn.value %d\n", cat_file('current_now')); } elsif ($graph_type eq "percents") { - printf ("cv.value %s\n", percent(cat_file('voltage_min_design'),cat_file('voltage_now'))); - printf ("cc.value %s\n", percent(cat_file('charge_full'),cat_file('charge_now'))); - printf ("fc.value %s\n", percent(cat_file('charge_full_design'),cat_file('charge_full'))); + printf ("cv.value %d\n", percent(cat_file('voltage_min_design'),cat_file('voltage_now'))); + printf ("cc.value %d\n", percent(cat_file('charge_full'),cat_file('charge_now'))); + printf ("fc.value %d\n", percent(cat_file('charge_full_design'),cat_file('charge_full'))); } - - diff --git a/plugins/battery/batteries b/plugins/battery/batteries index 9df76c4e..c8352fe5 100755 --- a/plugins/battery/batteries +++ b/plugins/battery/batteries @@ -1,5 +1,5 @@ #!/usr/bin/perl -w -# -*- perl -*- +# -*- perl -*- =head1 NAME @@ -7,7 +7,7 @@ batteries Munin plugin to monitor the battery states through procfs and sysfs =head1 APPLICABLE SYSTEMS -Systems with avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx +Systems with available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx =head1 CONFIGURATION @@ -16,30 +16,30 @@ none =head1 INTERPRETATION The plugin shows: - Design capacity -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Last full capacity -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Design capacity low -> avialable only if avialable /proc/acpi/battery/BATx - Design capacity warning -> avialable only if avialable /proc/acpi/battery/BATx - Capacity granularity 1 -> avialable only if avialable /proc/acpi/battery/BATx - Capacity granularity 2 -> avialable only if avialable /proc/acpi/battery/BATx - Remaining capacity -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Present rate -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Percentage Current/design voltage -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Percentage Current/full capacity -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Percentage Full/design capacity -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Design voltage -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx - Present voltage -> avialable if avialable /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Design capacity -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Last full capacity -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Design capacity low -> available only if available /proc/acpi/battery/BATx + Design capacity warning -> available only if available /proc/acpi/battery/BATx + Capacity granularity 1 -> available only if available /proc/acpi/battery/BATx + Capacity granularity 2 -> available only if available /proc/acpi/battery/BATx + Remaining capacity -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Present rate -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Percentage Current/design voltage -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Percentage Current/full capacity -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Percentage Full/design capacity -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Design voltage -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx + Present voltage -> available if available /proc/acpi/battery/BATx or /sys/class/power_supply/BATx =head1 MAGIC MARKERS #%# family=power #%# capabilities=autoconf -=head1 VERSION +=head1 VERSION 1.0 -=head1 BUGS +=head1 BUGS None known. @@ -219,7 +219,7 @@ sub read_data } -my $graphs = +my $graphs = { 'batteryes_capacity' => { 'vlabel' => 'Capacity, Ah', 'title' => '%s capacity', 'args' => '--base 1000', 'fields' => [qw/design_capacity last_full_capacity design_capacity_low design_capacity_warning capacity_granularity_1 capacity_granularity_2 remaining_capacity/] }, @@ -231,7 +231,7 @@ my $graphs = 'fields' => [qw/present_rate/] } }; -my $fields = +my $fields = { 'design_capacity' => { 'source' => 'both', 'draw' => 'AREA' , 'label' => 'Design capacity' , 'info' => 'Battery design capacity' }, 'last_full_capacity' => { 'source' => 'both', 'draw' => 'AREA' , 'label' => 'Last full capacity' , 'info' => 'Battery full charge capacity' }, @@ -250,7 +250,7 @@ my $fields = # ------------------------------------ start here ----------------------------------- -if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) +if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) { printf("%s\n", (-e $proc_path or -e $sys_path) ? "yes" : "no ($proc_path and $sys_path not exists)"); exit (0); @@ -268,7 +268,7 @@ if ($ARGV[0] and $ARGV[0] eq "config") $config{$graph}{'graph'}{'title'} = sprintf($graphs->{$graph}{'title'}, 'Mean batteryes'); $config{$graph}{'graph'}{'args'} = $graphs->{$graph}{'args'}; $config{$graph}{'graph'}{'vlabel'} = $graphs->{$graph}{'vlabel'}; - $config{$graph}{'graph'}{'category'} = 'power'; + $config{$graph}{'graph'}{'category'} = 'sensors'; foreach my $field (@{$graphs->{$graph}{'fields'}}) { if(($proc_data_exists and $fields->{$field}{'source'} eq 'proc') or $fields->{$field}{'source'} eq 'both') @@ -290,7 +290,7 @@ if ($ARGV[0] and $ARGV[0] eq "config") $config{$graph_name}{'graph'}{'info'} = sprintf("%s battery %s %s (sn: %s)", $info->{$i}{'battery_type'}, $info->{$i}{'manufacturer'}, $info->{$i}{'model_name'}, $info->{$i}{'serial_number'}); $config{$graph_name}{'graph'}{'args'} = '--base 1000'; $config{$graph_name}{'graph'}{'vlabel'} = $graphs->{$graph}{'vlabel'}; - $config{$graph_name}{'graph'}{'category'} = 'power'; + $config{$graph_name}{'graph'}{'category'} = 'sensors'; foreach my $field (@{$graphs->{$graph}{'fields'}}) { if(($proc_data_exists and $fields->{$field}{'source'} eq 'proc') or $fields->{$field}{'source'} eq 'both') diff --git a/plugins/bigbrother/b3error_ b/plugins/bigbrother/b3error_ index 23741fe2..23b5dab1 100755 --- a/plugins/bigbrother/b3error_ +++ b/plugins/bigbrother/b3error_ @@ -23,13 +23,12 @@ STATEFILE=$MUNIN_PLUGSTATE/${B3NAME}.offset mktempfile () { mktemp -t $1 -} +} if [ "$1" = "autoconf" ]; then if [ -n "${B3NAME}" -a -f "${logfile}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ]; then echo yes - exit 0 else echo -n "no" if [ ! -n "${B3NAME}" ]; then @@ -43,13 +42,13 @@ if [ "$1" = "autoconf" ]; then elif [ ! -x "${LOGTAIL}" ]; then echo " (cannot execute ${LOGTAIL})" fi - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then echo "graph_title BigBrotherBot errors - ${B3NAME}" - cat <<'EOM' + cat <<'EOM' graph_args --base 1000 -l 0 graph_vlabel Errors graph_category Games diff --git a/plugins/bind/bind95_ b/plugins/bind/bind95_ index bf42fe81..1d589df0 100755 --- a/plugins/bind/bind95_ +++ b/plugins/bind/bind95_ @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Copyright Jean-Samuel Reynaud -# Licenced under GPL v2 +# Licensed under GPL v2 # # We use this script to produce graph with munin for dns requests # This script must have his name start with bind95_ @@ -122,12 +122,12 @@ if ( defined($ARGV[0]) && $ARGV[0] eq "config" ) { if ( defined($ARGV[0]) && $ARGV[0] eq "autoconf" ) { if (! -f $stat_file) { - printf "Unable to file bind stat file on %s",$stat_file; - exit 1; + printf "no (Unable to file bind stat file on %s)",$stat_file; + exit 0; } if (! -f $rndc) { - printf "Unable to file rndc tool (configured : %s)",$rndc; - exit 1; + printf "no (Unable to file rndc tool (configured : %s))",$rndc; + exit 0; } exit 0; } diff --git a/plugins/bind/bind9_resolver_stats b/plugins/bind/bind9_resolver_stats index 86e410d5..80f20373 100755 --- a/plugins/bind/bind9_resolver_stats +++ b/plugins/bind/bind9_resolver_stats @@ -72,9 +72,7 @@ if [ "$1" = "config" ]; then # If dirty config capability is enabled then fall through # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # Output the stats. diff --git a/plugins/bind/bind9_rr b/plugins/bind/bind9_rr index 1e29be03..e510f556 100755 --- a/plugins/bind/bind9_rr +++ b/plugins/bind/bind9_rr @@ -84,7 +84,7 @@ sub do_stats { if ($3 eq 'IN' and $4 !~ /^TYPE/) { my $crr = lc $2; $IN{$crr}++; - } + } } } close(Q); diff --git a/plugins/bind/bind9_server_stats b/plugins/bind/bind9_server_stats index 4d495982..3c211e3c 100755 --- a/plugins/bind/bind9_server_stats +++ b/plugins/bind/bind9_server_stats @@ -69,11 +69,9 @@ if [ "$1" = "config" ]; then echo ${key}.type COUNTER done - # If dirty config capability is enabled then fall through - # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + # If dirty config capability is enabled then fall through + # to output the data with the config information. + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # Output the stats. diff --git a/plugins/bind/bind9_socket_stats b/plugins/bind/bind9_socket_stats index be742f33..f84261ed 100755 --- a/plugins/bind/bind9_socket_stats +++ b/plugins/bind/bind9_socket_stats @@ -67,11 +67,9 @@ if [ "$1" = "config" ]; then echo ${key}.type COUNTER done - # If dirty config capability is enabled then fall through - # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + # If dirty config capability is enabled then fall through + # to output the data with the config information. + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # Output the stats. diff --git a/plugins/bind/bind_ b/plugins/bind/bind_ index 92fd8759..bf2b8fe4 100755 --- a/plugins/bind/bind_ +++ b/plugins/bind/bind_ @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Copyright Jean-Samuel Reynaud -# Licenced under GPL v2 +# Licensed under GPL v2 # # We use this script to produce graph with munin for dns requests # This script must have his name start with bind_ @@ -98,11 +98,11 @@ if ( defined($ARGV[0]) && $ARGV[0] eq "config" ) { if ( defined($ARGV[0]) && $ARGV[0] eq "autoconf" ) { if (! -f $stat_file) { printf "Unable to file bind stat file on %s",$stat_file; - exit 1; + exit 0; } if (! -f $rndc) { printf "Unable to file rndc tool (configured : %s)",$rndc; - exit 1; + exit 0; } exit 0; } diff --git a/plugins/bird/bird b/plugins/bird/bird index b9c37926..b6aa4e15 100755 --- a/plugins/bird/bird +++ b/plugins/bird/bird @@ -111,11 +111,10 @@ sub get_stats { sub autoconf { if (-S $socket) { say 'yes'; - exit 0; } else { say 'no'; - exit 1; } + exit 0; } sub config { diff --git a/plugins/boinc/boinc_estwk b/plugins/boinc/boinc_estwk index 0042f19e..47d4cb00 100755 --- a/plugins/boinc/boinc_estwk +++ b/plugins/boinc/boinc_estwk @@ -288,7 +288,7 @@ Linux machines running BOINC and munin-node - or - -Linux servers (running munin-node) used to collect data from other systems +Linux servers (running munin-node) used to collect data from other systems which are running BOINC, but not running munin-node (e.g. non-Linux systems) =head1 CONFIGURATION @@ -319,48 +319,48 @@ Warning level - minimum estimated work (default: 24.00 hours) =item B -Password for BOINC (default: none) +Password for BOINC (default: none) =back =head2 B -Using of variable B poses a security risk. Even if the Munin -configuration file for this plugin containing BOINC-password is properly -protected, the password is exposed as environment variable and finally passed -to boinccmd as a parameter. It is therefore possible for local users of the -machine running this plugin to eavesdrop the BOINC password. +Using of variable B poses a security risk. Even if the Munin +configuration file for this plugin containing BOINC-password is properly +protected, the password is exposed as environment variable and finally passed +to boinccmd as a parameter. It is therefore possible for local users of the +machine running this plugin to eavesdrop the BOINC password. -Using of variable password is therefore strongly discouraged and is left here +Using of variable password is therefore strongly discouraged and is left here as a legacy option and for testing purposes. -It should be always possible to use B variable instead - in such case -the file gui_rpc_auth.cfg is read by boinccmd binary directly. -If this plugin is used to fetch data from remote system, the gui_rpc_auth.cfg -can be copied to special directory in a secure way (e.g. via scp) and properly +It should be always possible to use B variable instead - in such case +the file gui_rpc_auth.cfg is read by boinccmd binary directly. +If this plugin is used to fetch data from remote system, the gui_rpc_auth.cfg +can be copied to special directory in a secure way (e.g. via scp) and properly protected by file permissions. =head1 INTERPRETATION -This plugin shows the estimated remaining computation time for all CPUs of +This plugin shows the estimated remaining computation time for all CPUs of the machine and the estimated remaining computation time of longest workunit. -The estimation is based on assumption that the workunits of different lengths +The estimation is based on assumption that the workunits of different lengths will be distributed to the CPUs evenly (which is not always the case). -The warning level can be used to warn in forward about the risk of workunits +The warning level can be used to warn in forward about the risk of workunits local cache depletion and BOINC client running out of the work. -Although such warning can be achieved by configuring Munin master, there is +Although such warning can be achieved by configuring Munin master, there is also this option to configure it on munin-node side. =head1 EXAMPLES =head2 Local BOINC Example -BOINC is running on local machine. The BOINC binaries are installed in +BOINC is running on local machine. The BOINC binaries are installed in F, the BOINC is running in directory -F under username boinc, group boinc and the password is used +F under username boinc, group boinc and the password is used to protect access to BOINC. -Warning will be set when estimated work for any of CPUs will decrease under +Warning will be set when estimated work for any of CPUs will decrease under 48 hours: [boinc_*] @@ -371,17 +371,17 @@ Warning will be set when estimated work for any of CPUs will decrease under =head2 Remote BOINC Example -BOINC is running on 2 remote machines C and C. -On the local machine the binary of command-line interface is installed in +BOINC is running on 2 remote machines C and C. +On the local machine the binary of command-line interface is installed in directory F. -The BOINC password used on the remote machine C is stored in file +The BOINC password used on the remote machine C is stored in file F. -The BOINC password used on the remote machine C is stored in file +The BOINC password used on the remote machine C is stored in file F. -These files are owned and readable by root, readable by group munin and not -readable by others. -There are 2 symbolic links to this plugin created in the munin plugins -directory (usually F): F and +These files are owned and readable by root, readable by group munin and not +readable by others. +There are 2 symbolic links to this plugin created in the munin plugins +directory (usually F): F and F [snmp_foo_boinc*] @@ -396,33 +396,33 @@ F env.host bar env.boincdir /etc/munin/boinc/bar -This way the plugin can be used by Munin the same way as the Munin plugins +This way the plugin can be used by Munin the same way as the Munin plugins utilizng SNMP (although this plugin itself does not use SNMP). =head1 BUGS -The estimation is based on simple assumption, that longest workunits will be -processed first. This is the case when work is distributed evenly among CPUs. -But this is not always the case, because various deadlines for various -workunits may fire the "panic mode" of BOINC and scheduling could be much -different. -For example, there can be 4 CPUs, and BOINC having downloaded 4 workunits -with estimated run-time 1 hour each and 3 workunits with estimated run-time -4 hours each. +The estimation is based on simple assumption, that longest workunits will be +processed first. This is the case when work is distributed evenly among CPUs. +But this is not always the case, because various deadlines for various +workunits may fire the "panic mode" of BOINC and scheduling could be much +different. +For example, there can be 4 CPUs, and BOINC having downloaded 4 workunits +with estimated run-time 1 hour each and 3 workunits with estimated run-time +4 hours each. This Munin plugin will report estimated work 4 hours for each CPU. -But if deadline of those 1-hour workunits will be much shorter than deadline -of those 4-hours workunits, BOINC will schedule short workunits first (for all +But if deadline of those 1-hour workunits will be much shorter than deadline +of those 4-hours workunits, BOINC will schedule short workunits first (for all 4 CPUs) and after finishing them it will schedule those long workunits. This will result in real computation for 5 hours on 3 CPUs but only 1 hour on -remaining 4th CPU. So after 1 hour of computation 1 of CPUs will run out of +remaining 4th CPU. So after 1 hour of computation 1 of CPUs will run out of work. -There is no C capability at the moment. This is due to the fact, that -BOINC installations may vary over different systems, sometimes using default -directory from distribution (e.g. F in Debian or Ubuntu), but +There is no C capability at the moment. This is due to the fact, that +BOINC installations may vary over different systems, sometimes using default +directory from distribution (e.g. F in Debian or Ubuntu), but often running in user directories or in other separate directories. -Also the user-ID under which BOINC runs often differs. -Under these circumstances the C would be either lame or too +Also the user-ID under which BOINC runs often differs. +Under these circumstances the C would be either lame or too complicated. =head1 AUTHOR diff --git a/plugins/boinc/boinc_processes b/plugins/boinc/boinc_processes index adf6f584..0d06d1b7 100755 --- a/plugins/boinc/boinc_processes +++ b/plugins/boinc/boinc_processes @@ -5,12 +5,12 @@ # Parameters: # # password -- The password for RPC authentication -# (default: boinc_cmd will look for a file +# (default: boinc_cmd will look for a file # 'gui_rpc_auth.cfg' and use the password in it) # host -- the host to connect to (default: localhost) # port -- optional (default: 31416) # -# This plugin can monitor boinc processes running on local/remote machines. +# This plugin can monitor boinc processes running on local/remote machines. # You can see the progress on various projects. # # Author: Petr Ruzicka @@ -49,50 +49,50 @@ sub autoconf { $reply =~ /(.*)<\/nonce>/; $hash = md5_hex($1, $password); - + print $client "$hash\003"; - + { local $/ = "\003"; $reply = <$client>; } - + if ($reply =~ //) { print "yes\n"; exit 0; } } print "no\n"; - exit 1; + exit 0; } sub config { my $client = IO::Socket::INET->new ( PeerAddr => $host, PeerPort => $port, Proto => 'tcp' ) - or die "Can't bind : $@\n"; + or die "Can't bind : $@\n"; print $client "\003"; - { + { local $/ = "\003"; $reply = <$client>; } $reply =~ /(.*)<\/nonce>/; my $hash = md5_hex($1, $password); - + print $client "$hash\003"; - + { local $/ = "\003"; $reply = <$client>; } - + if ($reply !~ //) { die "Wrong password: $_"; } - + print $client ""; while (chomp($reply = <$client>) && ($reply ne "")) { @@ -101,7 +101,7 @@ sub config { print "graph_category htc\n"; print "graph_args -l 0\n"; print "graph_vlabel %\n"; - } + } if ($reply =~ /(.*)<\/project_name>/) { my $boinc_munin_name=$1; $boinc_munin_name =~ /(\w+).*/; @@ -115,31 +115,31 @@ sub report { my $client = IO::Socket::INET->new ( PeerAddr => $host, PeerPort => $port, Proto => 'tcp' ) - or die "Can't bind : $@\n"; + or die "Can't bind : $@\n"; print $client "\003"; - { + { local $/ = "\003"; $reply = <$client>; } $reply =~ /(.*)<\/nonce>/; my $hash = md5_hex($1, $password); - + print $client "$hash\003"; - + { local $/ = "\003"; $reply = <$client>; } - + if ($reply !~ //) { die "Wrong password: $_"; } - + print $client ""; - + while (chomp($reply = <$client>) && ($reply ne "")) { if ($reply =~ /(\w+).*<\/project_name>/) { $project = $1; diff --git a/plugins/boinc/boinc_projs b/plugins/boinc/boinc_projs index 25ee64b6..352397c1 100755 --- a/plugins/boinc/boinc_projs +++ b/plugins/boinc/boinc_projs @@ -279,7 +279,7 @@ Linux machines running BOINC and munin-node - or - -Linux servers (running munin-node) used to collect data from other systems +Linux servers (running munin-node) used to collect data from other systems which are running BOINC, but not running munin-node (e.g. non-Linux systems) =head1 CONFIGURATION @@ -306,40 +306,40 @@ Directory containing appropriate file gui_rpc_auth.cfg (default: none) =item B -Password for BOINC (default: none) +Password for BOINC (default: none) =back =head2 B -Using of variable B poses a security risk. Even if the Munin -configuration file for this plugin containing BOINC-password is properly -protected, the password is exposed as environment variable and finally passed -to boinccmd as a parameter. It is therefore possible for local users of the -machine running this plugin to eavesdrop the BOINC password. +Using of variable B poses a security risk. Even if the Munin +configuration file for this plugin containing BOINC-password is properly +protected, the password is exposed as environment variable and finally passed +to boinccmd as a parameter. It is therefore possible for local users of the +machine running this plugin to eavesdrop the BOINC password. -Using of variable password is therefore strongly discouraged and is left here +Using of variable password is therefore strongly discouraged and is left here as a legacy option and for testing purposes. -It should be always possible to use B variable instead - in such case -the file gui_rpc_auth.cfg is read by boinccmd binary directly. -If this plugin is used to fetch data from remote system, the gui_rpc_auth.cfg -can be copied to special directory in a secure way (e.g. via scp) and properly +It should be always possible to use B variable instead - in such case +the file gui_rpc_auth.cfg is read by boinccmd binary directly. +If this plugin is used to fetch data from remote system, the gui_rpc_auth.cfg +can be copied to special directory in a secure way (e.g. via scp) and properly protected by file permissions. =head1 INTERPRETATION -This plugin shows the number of currently running BOINC tasks on the machine. -If machine is attached to several BOINC projects, data for all these projects +This plugin shows the number of currently running BOINC tasks on the machine. +If machine is attached to several BOINC projects, data for all these projects are displayed. =head1 EXAMPLES =head2 Local BOINC Example -BOINC is running on local machine. The BOINC binaries are installed in +BOINC is running on local machine. The BOINC binaries are installed in F, the BOINC is running in directory -F under username boinc, group boinc and the password is used +F under username boinc, group boinc and the password is used to protect access to BOINC: [boinc_*] @@ -349,17 +349,17 @@ to protect access to BOINC: =head2 Remote BOINC Example -BOINC is running on 2 remote machines C and C. -On the local machine the binary of command-line interface is installed in +BOINC is running on 2 remote machines C and C. +On the local machine the binary of command-line interface is installed in directory F. -The BOINC password used on the remote machine C is stored in file +The BOINC password used on the remote machine C is stored in file F. -The BOINC password used on the remote machine C is stored in file +The BOINC password used on the remote machine C is stored in file F. -These files are owned and readable by root, readable by group munin and not -readable by others. -There are 2 symbolic links to this plugin created in the munin plugins -directory (usually F): F and +These files are owned and readable by root, readable by group munin and not +readable by others. +There are 2 symbolic links to this plugin created in the munin plugins +directory (usually F): F and F [boincprojs_foo] @@ -379,12 +379,12 @@ SNMP and IPMI plugins. =head1 BUGS -There is no C capability at the moment. This is due to the fact, that -BOINC installations may vary over different systems, sometimes using default -directory from distribution (e.g. F in Debian or Ubuntu), but +There is no C capability at the moment. This is due to the fact, that +BOINC installations may vary over different systems, sometimes using default +directory from distribution (e.g. F in Debian or Ubuntu), but often running in user directories or in other separate directories. -Also the user-ID under which BOINC runs often differs. -Under these circumstances the C would be either lame or too +Also the user-ID under which BOINC runs often differs. +Under these circumstances the C would be either lame or too complicated. =head1 AUTHOR diff --git a/plugins/boinc/boinc_wus b/plugins/boinc/boinc_wus index 3c9515cc..d5a4b5dd 100755 --- a/plugins/boinc/boinc_wus +++ b/plugins/boinc/boinc_wus @@ -317,7 +317,7 @@ Linux machines running BOINC and munin-node - or - -Linux servers (running munin-node) used to collect data from other systems +Linux servers (running munin-node) used to collect data from other systems which are running BOINC, but not running munin-node (e.g. non-Linux systems) =head1 CONFIGURATION @@ -348,45 +348,45 @@ Display unusual states details (default: 0 = Summarize unusual states as C -Password for BOINC (default: none) +Password for BOINC (default: none) =back =head2 B -Using of variable B poses a security risk. Even if the Munin -configuration file for this plugin containing BOINC-password is properly -protected, the password is exposed as environment variable and finally passed -to boinccmd as a parameter. It is therefore possible for local users of the -machine running this plugin to eavesdrop the BOINC password. +Using of variable B poses a security risk. Even if the Munin +configuration file for this plugin containing BOINC-password is properly +protected, the password is exposed as environment variable and finally passed +to boinccmd as a parameter. It is therefore possible for local users of the +machine running this plugin to eavesdrop the BOINC password. -Using of variable password is therefore strongly discouraged and is left here +Using of variable password is therefore strongly discouraged and is left here as a legacy option and for testing purposes. -It should be always possible to use B variable instead - in such case -the file gui_rpc_auth.cfg is read by boinccmd binary directly. -If this plugin is used to fetch data from remote system, the gui_rpc_auth.cfg -can be copied to special directory in a secure way (e.g. via scp) and properly +It should be always possible to use B variable instead - in such case +the file gui_rpc_auth.cfg is read by boinccmd binary directly. +If this plugin is used to fetch data from remote system, the gui_rpc_auth.cfg +can be copied to special directory in a secure way (e.g. via scp) and properly protected by file permissions. =head1 INTERPRETATION -This plugin shows how many BOINC workunits are in all the various states. -The most important states C, C, C, -C, C, C and C are always +This plugin shows how many BOINC workunits are in all the various states. +The most important states C, C, C, +C, C, C and C are always displayed. All other states are shown as C. -If the variable B is used, additionally also states -C and C are shown separately (they are included in -C otherwise). +If the variable B is used, additionally also states +C and C are shown separately (they are included in +C otherwise). =head1 EXAMPLES =head2 Local BOINC Example -BOINC is running on local machine. The BOINC binaries are installed in +BOINC is running on local machine. The BOINC binaries are installed in F, the BOINC is running in directory -F under username boinc, group boinc and the password is used +F under username boinc, group boinc and the password is used to protect access to BOINC: [boinc_*] @@ -397,17 +397,17 @@ to protect access to BOINC: =head2 Remote BOINC Example -BOINC is running on 2 remote machines C and C. -On the local machine the binary of command-line interface is installed in +BOINC is running on 2 remote machines C and C. +On the local machine the binary of command-line interface is installed in directory F. -The BOINC password used on the remote machine C is stored in file +The BOINC password used on the remote machine C is stored in file F. -The BOINC password used on the remote machine C is stored in file +The BOINC password used on the remote machine C is stored in file F. -These files are owned and readable by root, readable by group munin and not -readable by others. -There are 2 symbolic links to this plugin created in the munin plugins -directory (usually F): F and +These files are owned and readable by root, readable by group munin and not +readable by others. +There are 2 symbolic links to this plugin created in the munin plugins +directory (usually F): F and F [snmp_foo_boinc*] @@ -422,17 +422,17 @@ F env.host bar env.boincdir /etc/munin/boinc/bar -This way the plugin can be used by Munin the same way as the Munin plugins +This way the plugin can be used by Munin the same way as the Munin plugins utilizng SNMP (although this plugin itself does not use SNMP). =head1 BUGS -There is no C capability at the moment. This is due to the fact, that -BOINC installations may vary over different systems, sometimes using default -directory from distribution (e.g. F in Debian or Ubuntu), but +There is no C capability at the moment. This is due to the fact, that +BOINC installations may vary over different systems, sometimes using default +directory from distribution (e.g. F in Debian or Ubuntu), but often running in user directories or in other separate directories. -Also the user-ID under which BOINC runs often differs. -Under these circumstances the C would be either lame or too +Also the user-ID under which BOINC runs often differs. +Under these circumstances the C would be either lame or too complicated. =head1 AUTHOR diff --git a/plugins/bsd/df_abs_bsd b/plugins/bsd/df_abs_bsd index 1c40bd57..ecbe233a 100755 --- a/plugins/bsd/df_abs_bsd +++ b/plugins/bsd/df_abs_bsd @@ -57,7 +57,7 @@ graph_info Filesystem usage for i in range(1,len(rawdata)): dataline=rawdata[i].split() self.parsed_data[re.sub('/', '_', dataline[0])] = (int(dataline[1]), int(dataline[2]), dataline[5]) - + if __name__ == "__main__": processor = checker() processor.get_data() diff --git a/plugins/bsd/df_bsd b/plugins/bsd/df_bsd index 49390ab5..a2119c37 100755 --- a/plugins/bsd/df_bsd +++ b/plugins/bsd/df_bsd @@ -55,7 +55,7 @@ graph_info Filesystem usage for i in range(1,len(rawdata)): dataline=rawdata[i].split() self.parsed_data[re.sub('/', '_', dataline[0])] = (re.sub('%', '', dataline[4]), dataline[5]) - + if __name__ == "__main__": processor = checker() processor.get_data() diff --git a/plugins/bsd/netstat_bsd_m_-day.png b/plugins/bsd/example-graphs/netstat_bsd_m_-day.png similarity index 100% rename from plugins/bsd/netstat_bsd_m_-day.png rename to plugins/bsd/example-graphs/netstat_bsd_m_-day.png diff --git a/plugins/bsd/netstat_bsd_m_ b/plugins/bsd/netstat_bsd_m_ index 082c4831..2e359a23 100755 --- a/plugins/bsd/netstat_bsd_m_ +++ b/plugins/bsd/netstat_bsd_m_ @@ -1,48 +1,54 @@ #!/usr/bin/env ruby -# netstat_bsd_m revision 1 (Feb 2012) -# -# This plugin shows various statistics from 'netstat -m' -# -# Required privileges: none -# -# OS: -# Supposed: BSD -# Tested: FreeBSD 8.2 -# -# Author: Artem Sheremet -# +=begin + +netstat_bsd_m revision 1 (Feb 2012) + +This plugin shows various statistics from 'netstat -m' + +Required privileges: none + +OS: + Supposed: BSD + Tested: FreeBSD 8.2 + +Author: Artem Sheremet #%# family=auto #%# capabilities=autoconf suggest +=end + # original filename -PLUGIN_NAME = 'netstat_bsd_m_' +PLUGIN_NAME = 'netstat_bsd_m_'.freeze class String - def escape - self.gsub /[^\w]/, '_' - end + def escape + gsub(/[^\w]/, '_') + end - unless method_defined? :start_with? - def start_with?(str) - self[0...str.size] == str - end - end + unless method_defined? :start_with? + def start_with?(str) + self[0...str.size] == str + end + end end def netstat_m(filter = nil) - Hash[`netstat -m`.split($/).map { |line| - if line =~ /^([\d\/K]+) (.*) \(([\w\/+]+)\)$/ - # 7891K/22385K/30276K bytes allocated to network (current/cache/total) - values, desc, names = $1, $2, $3 - [desc, names.split('/').zip(values.split '/')] if filter.nil? or desc.escape == filter - elsif line =~ /^(\d+) (.*)$/ - # 12327 requests for I/O initiated by sendfile - value, desc = $1, $2 - [desc, [[ :value, value ]]] if filter.nil? or desc.escape == filter - end - }.compact] + Hash[`netstat -m`.split($/).map do |line| + if line =~ %r{^([\d/K]+) (.*) \(([\w/+]+)\)$} + # 7891K/22385K/30276K bytes allocated to network (current/cache/total) + values = Regexp.last_match(1) + desc = Regexp.last_match(2) + names = Regexp.last_match(3) + [desc, names.split('/').zip(values.split('/'))] if filter.nil? || (desc.escape == filter) + elsif line =~ /^(\d+) (.*)$/ + # 12327 requests for I/O initiated by sendfile + value = Regexp.last_match(1) + desc = Regexp.last_match(2) + [desc, [[:value, value]]] if filter.nil? || (desc.escape == filter) + end + end.compact] end stat_name = File.basename($0, '.*').escape @@ -50,47 +56,47 @@ stat_name.slice! 0, PLUGIN_NAME.size if stat_name.start_with? PLUGIN_NAME case ARGV.first when 'autoconf' - puts `uname -s`.include?('FreeBSD') ? 'yes' : 'no' + puts `uname -s`.include?('FreeBSD') ? 'yes' : 'no' when 'suggest' - puts netstat_m.keys.map(&:escape).join $/ + puts netstat_m.keys.map(&:escape).join $/ when 'config' - data = netstat_m(stat_name) - if data.empty? - warn "no data for <#{stat_name}>. Try running with 'suggest'" - else - desc, values = data.first - stack = values.size > 1 - first = true - puts <. Try running with 'suggest'" + else + desc, values = data.first + stack = values.size > 1 + first = true + puts <<~CONFIG + graph_title Netstat: #{desc} + graph_category network + graph_vlabel current + graph_order #{values.map { |name, _| name.to_s.escape }.join ' '} + CONFIG + puts values.map { |name, _| + esc_name = name.to_s.escape + "#{esc_name}.draw " + if %w[total max].include? name + 'LINE' + elsif stack + if first + first = false + 'AREA' + else + 'STACK' + end + else + 'LINE2' + end + "\n#{esc_name}.label #{name}" + }.join $/ + end when nil # fetch - data = netstat_m(stat_name) - unless data.empty? - puts data.first.last.map { |name, value| - value = value.to_i * 1024 if value.end_with? 'K' - "#{name.to_s.escape}.value #{value}" - }.join $/ - end + data = netstat_m(stat_name) + unless data.empty? + puts data.first.last.map { |name, value| + value = value.to_i * 1024 if value.end_with? 'K' + "#{name.to_s.escape}.value #{value}" + }.join $/ + end else - warn "unrecognized argument <#{ARGV.first}>" + warn "unrecognized argument <#{ARGV.first}>" end diff --git a/plugins/bsd/spamd-blacklist-bsd b/plugins/bsd/spamd-blacklist-bsd index 257f980c..f95f344b 100755 --- a/plugins/bsd/spamd-blacklist-bsd +++ b/plugins/bsd/spamd-blacklist-bsd @@ -77,4 +77,4 @@ black.label Blacklisted""" processor = checker(blacklists) processor.process_lines(logfile) print processor - + diff --git a/plugins/bsd/spamd-tarpit-bsd b/plugins/bsd/spamd-tarpit-bsd index 6989aea5..d547b03c 100755 --- a/plugins/bsd/spamd-tarpit-bsd +++ b/plugins/bsd/spamd-tarpit-bsd @@ -68,4 +68,4 @@ tarpit.label Average tarpit delay""" processor = checker() processor.process_lines(logfile) print processor - + diff --git a/plugins/bsd/uptime_bsd b/plugins/bsd/uptime_bsd index 898a1a09..98f80792 100755 --- a/plugins/bsd/uptime_bsd +++ b/plugins/bsd/uptime_bsd @@ -48,7 +48,7 @@ use Date::Parse; my $kern=`sysctl -n kern.version`; $kern=~ /:\s+(.*\S)\s+\w+\@/; #print "Compile: $1\n"; -$kern=str2time($1); +$kern= $1 ? str2time($1) : undef; my $boot=`sysctl -n kern.boottime`; # OpenBSD will return seconds from the epoch if ($ostype ne "OpenBSD") { @@ -59,5 +59,5 @@ if ($ostype ne "OpenBSD") { my $now=time; -print "compile.value ",($now-$kern)/60/60/24,"\n"; +print "compile.value ",($now-$kern)/60/60/24,"\n" if $kern; print "uptime.value ",($now-$boot)/60/60/24,"\n"; diff --git a/plugins/celery/celery_tasks b/plugins/celery/celery_tasks index 6a505743..668e2524 100755 --- a/plugins/celery/celery_tasks +++ b/plugins/celery/celery_tasks @@ -79,7 +79,7 @@ def get_data(what, api_url, *args): except IOError: print 'Could not connect to the celerymon webserver' sys.exit(-1) - + def check_web_server_status(api_url): try: request = urllib.urlopen(api_url) @@ -97,32 +97,32 @@ def print_config(task_names): print 'graph_args --lower-limit 0' print 'graph_scale no' print 'graph_vlabel tasks per ${graph_period}' - print 'graph_category other' + print 'graph_category cloud' for name in task_names: print '%s.label %s' % (clean_task_name(name), name) print '%s.type DERIVE' % (clean_task_name(name)) print '%s.min 0' % (clean_task_name(name)) print '%s.info number of %s tasks' % (clean_task_name(name), name) - + # Values def print_values(task_names = None, api_url = None): for task_name in task_names: count = len(get_data('task_details', api_url, task_name)) print '%s.value %d' % (clean_task_name(task_name), count) - + if __name__ == '__main__': task_names = os.environ.get('tasks', None) api_url = os.environ.get('api_url', API_URL) - + check_web_server_status(api_url) - + if not task_names: print 'You need to define at least one task name' sys.exit(-1) - + task_names = task_names.split(',') - + if len(sys.argv) > 1: if sys.argv[1] == 'config': print_config(task_names) diff --git a/plugins/celery/celery_tasks_states b/plugins/celery/celery_tasks_states index 1663d76a..cf978076 100755 --- a/plugins/celery/celery_tasks_states +++ b/plugins/celery/celery_tasks_states @@ -82,7 +82,7 @@ def get_data(what, api_url, *args): except IOError: print 'Could not connect to the celerymon webserver' sys.exit(-1) - + def check_web_server_status(api_url): try: request = urllib.urlopen(api_url) @@ -103,7 +103,7 @@ def print_config(workers = None): print 'graph_args --lower-limit 0' print 'graph_scale no' print 'graph_vlabel tasks per ${graph_period}' - print 'graph_category other' + print 'graph_category cloud' for name in TASK_STATES: name = clean_state_name(name) @@ -115,33 +115,33 @@ def print_config(workers = None): # Values def print_values(workers = None, api_url = None): data = get_data('tasks', api_url) - + counters = dict([(key, 0) for key in TASK_STATES]) for task_name, task_data in data: state = task_data['state'] hostname = task_data['worker']['hostname'] - + if workers and hostname not in workers: continue - + counters[state] += 1 - + for name in TASK_STATES: name_cleaned = clean_state_name(name) value = counters[name] print '%s.value %d' % (name_cleaned, value) - + if __name__ == '__main__': workers = os.environ.get('workers', 'all') api_url = os.environ.get('api_url', API_URL) - + check_web_server_status(api_url) - + if workers in [None, '', 'all']: workers = None else: workers = workers.split(',') - + if len(sys.argv) > 1: if sys.argv[1] == 'config': print_config(workers) @@ -149,4 +149,4 @@ if __name__ == '__main__': print 'yes' else: print_values(workers, api_url) - + diff --git a/plugins/ceph/ceph-osd-info b/plugins/ceph/ceph-osd-info index d9c7c6eb..d4dd4e8c 100755 --- a/plugins/ceph/ceph-osd-info +++ b/plugins/ceph/ceph-osd-info @@ -31,7 +31,7 @@ Jort Bloem This is one of the first programs I wrote in Python. I got carried away by Python's powerful one-line commands. Just because you can, doesn't -mean you should. +mean you should. This program needs a rewrite, and if there were any problems with it, I probably would. @@ -102,7 +102,7 @@ def read_osd(filename): return json.loads(result) except: pass - + return None def osd_list(): @@ -148,7 +148,7 @@ if (sys.argv.__len__()>1) and (sys.argv[1]=="config"): gr_simple=graph.replace("-","_").replace(":","_") gr_pretty=graph.replace("_"," ").title() gr=graph.replace("-","_").replace(":","_") - graphdefaults={"graph_title":gr_pretty,"graph_vlabel":gr_pretty,"graph_category":"osd"} + graphdefaults={"graph_title":gr_pretty,"graph_vlabel":gr_pretty,"graph_category":"fs"} graphsettings=dict(graphdefaults.items()+graphsettings.items()) print "multigraph %s" % (gr_simple) print "\n".join(["%s %s" % setting for setting in graphsettings.items()]) @@ -182,7 +182,3 @@ else: for osd in osds: print "multigraph %s.osd%s" % (gr,osd) print "osd%s_%s.value %s" % (osd,gr,data[osd][graph]) - -# for Munin Plugin Gallery -# graph_category fs - diff --git a/plugins/ceph/ceph_capacity b/plugins/ceph/ceph_capacity index 42a9685c..05e45b38 100755 --- a/plugins/ceph/ceph_capacity +++ b/plugins/ceph/ceph_capacity @@ -36,7 +36,7 @@ WARNING_LEVEL=${warning_level:-"80"} CRITICAL_LEVEL=${critical_level:-"90"} if [ "$1" = "config" ]; then - + echo 'graph_title CEPH capacity' echo 'graph_category fs' echo 'graph_vlabel GB' diff --git a/plugins/ceph/ceph_osd b/plugins/ceph/ceph_osd index 9d208b53..4e5404de 100755 --- a/plugins/ceph/ceph_osd +++ b/plugins/ceph/ceph_osd @@ -6,6 +6,10 @@ ceph_osd - Shows ceph OSD states (total configured, up and in) +=head1 APPLICABLE SYSTEMS + +Any host with ceph configured and "jq" being installed . + =head1 AUTHOR Mate Gabri @@ -28,7 +32,7 @@ fi if [ "$1" = "config" ]; then - + echo 'graph_title CEPH OSDs' echo 'graph_category fs' echo 'graph_vlabel nr' @@ -44,6 +48,8 @@ if [ "$1" = "config" ]; then exit 0 fi -echo "osds.value $(ceph -s | grep osdmap | awk '{ print $3 }')" -echo "up.value $(ceph -s | grep osdmap | awk '{ print $5 }')" -echo "in.value $(ceph -s | grep osdmap | awk '{ print $7 }')" +CEPH_STATUS=$(ceph -s --format=json) + +echo "osds.value $(echo "$CEPH_STATUS" | jq '.osdmap.osdmap.num_osds')" +echo "up.value $(echo "$CEPH_STATUS" | jq '.osdmap.osdmap.num_up_osds')" +echo "in.value $(echo "$CEPH_STATUS" | jq '.osdmap.osdmap.num_in_osds')" diff --git a/plugins/change.org/changeorg_signature_count b/plugins/change.org/changeorg_signature_count index b9e7ab8a..775c8af7 100755 --- a/plugins/change.org/changeorg_signature_count +++ b/plugins/change.org/changeorg_signature_count @@ -65,7 +65,7 @@ for i in environ.get('petitions').split(): continue # NB: user-agent's tweak is needed to avoid a 403 req = urllib.request.Request("https://api.change.org/v1/petitions/{}?api_key={}&fields=title".format(i, environ.get('APIkey')), - data=None, + data=None, headers={ 'User-Agent': 'curl/7.38.0' }) response = urllib.request.urlopen(req).read().decode('utf-8') petition_titles[i] = json.loads(response) diff --git a/plugins/chat/tinychat_users_ b/plugins/chat/tinychat_users_ index 39a40679..4e0a1fbb 100755 --- a/plugins/chat/tinychat_users_ +++ b/plugins/chat/tinychat_users_ @@ -4,7 +4,7 @@ ######################### # Munin Plugin to track monitor the number of users a tinychat room gets. # Author Phil Wray ( http://www.infjs.com ) -# +# # # # Usage: @@ -19,7 +19,7 @@ room=${0##*tinychat_users_} ## if [ "$1" = "autoconf" ]; then # Check that curl is installed - if hash curl >/dev/null 2>&1; then + if command -v curl >/dev/null 2>&1; then echo "yes" else echo "no (no curl installed)" @@ -36,11 +36,11 @@ echo "graph_title Tinychat Users for $room" echo 'graph_args --base 1000' echo 'graph_scale no' echo 'graph_vlabel Room Users' - echo 'graph_category Chat' + echo 'graph_category chat' echo 'total_count.label Room Users' echo 'total_count.draw AREA' echo 'broadcaster_count.draw STACK' - echo 'broadcaster_count.label Broadcasting' + echo 'broadcaster_count.label Broadcasting' exit 0 fi diff --git a/plugins/cherokee/munin-plugin-for-cherokee b/plugins/cherokee/munin-plugin-for-cherokee index 17e878bf..157ff744 100755 --- a/plugins/cherokee/munin-plugin-for-cherokee +++ b/plugins/cherokee/munin-plugin-for-cherokee @@ -82,7 +82,7 @@ def get_data(): base64string = base64.standard_b64encode('%s:%s' % (http_user, http_pass)) if len(http_user) > 0 and len(http_pass) > 0: #print "DEBUG: AUTH: " + base64string - request.add_header("Authorization", "Basic %s" % base64string) + request.add_header("Authorization", "Basic %s" % base64string) #print "DEBUG: GET: " + url raw_data = urllib2.urlopen(request).read() #print "DEBUG: " + raw_data @@ -129,7 +129,7 @@ def munin_config(response): for key in section: print key + '.label ' + key.capitalize() print key + '.draw AREASTACK' - + if len(sys.argv) > 1 and sys.argv[1] == "config": munin_config(get_data()) else: diff --git a/plugins/chilli/chilli_sessions_ b/plugins/chilli/chilli_sessions_ index 1e73f349..6c3dd9b0 100755 --- a/plugins/chilli/chilli_sessions_ +++ b/plugins/chilli/chilli_sessions_ @@ -80,7 +80,7 @@ case "$1" in done echo "total" - + exit 0 ;; config) diff --git a/plugins/chrony/chrony b/plugins/chrony/chrony index 314860f6..0554e91f 100755 --- a/plugins/chrony/chrony +++ b/plugins/chrony/chrony @@ -34,7 +34,7 @@ Revision 0.3 2014/02/16 zjttoefs reduce forking by using awk do not limit output precision add stratum monitoring - detect slow/fast time or freqency and adjust sign of value accordingly + detect slow/fast time or frequency and adjust sign of value accordingly remove commented out code Revision 0.4 2016/11/10 Lars Kruse diff --git a/plugins/chrony/chrony_drift b/plugins/chrony/chrony_drift new file mode 100755 index 00000000..ddaf4aab --- /dev/null +++ b/plugins/chrony/chrony_drift @@ -0,0 +1,88 @@ +#! /bin/sh +# -*- sh -*- + +: <<=cut + +=head1 NAME + +chrony_drift - Munin plugin to monitor the Chrony drift value. + +=head1 APPLICABLE SYSTEMS + +Any chronyd host. + +=head1 CONFIGURATION + +The following configuration parameters are used by this plugin: + + [chrony_drift] + env.driftfile - Path to driftfile. + +=head2 DEFAULT CONFIGURATION + + [chrony_drift] + env.driftfile /var/lib/chrony/chrony.drift + +=head1 USAGE + +Link this plugin to /etc/munin/plugins/ and restart the munin-node. + +=head1 AUTHOR + +HORINOUCHI Masato 2019-07-16 + +=head1 LICENSE + +Same as munin. + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf + +=cut + +driftfile=${driftfile:-'/var/lib/chrony/chrony.drift'} + +do_autoconf () { + if [ -r "$driftfile" ]; then + echo "yes" + else + echo "no (could not read driftfile '$driftfile'.)" + fi +} + + +do_config () { + cat <<'EOM' +graph_title Chrony Drift +graph_args --base 1000 +graph_vlabel Parts Per Million +graph_category time +drift.label System Clock Gain/Loss Rate +error_bound.label Estimate of Error Bound +graph_info The rate at which the system clock gains or loses time relative to real time. +EOM +} + + +do_ () { + if [ -r "$driftfile" ]; then + echo "drift.value $(awk '{print $1;}' "$driftfile")" + echo "error_bound.value $(awk '{print $2;}' "$driftfile")" + else + echo "drift.value U" + echo "error_bound.value U" + fi +} + + +case $1 in + autoconf|config|'') + do_"$1" + ;; + *) + echo "Don't know how to do that" >&2 + exit 1 + ;; +esac diff --git a/plugins/clamav/clamav b/plugins/clamav/clamav index 042a3d4e..16f9979c 100755 --- a/plugins/clamav/clamav +++ b/plugins/clamav/clamav @@ -4,7 +4,7 @@ log=/var/log/clamav/clamd.log if [ "$1" = "autoconf" ]; then - if [ -r $log ]; then + if [ -r $log ]; then echo "yes" else echo "no (cannot read $log)" @@ -16,7 +16,7 @@ if [ "$1" = "config" ]; then cat < /dev/null | grep "Total sessions: " | sed 's/ Total sessions: //') + +echo "udp_sessions.value $(pick_line "$SESSIONS" 1)" +echo "tcp_sessions.value $(pick_line "$SESSIONS" 2)" +echo "dtls_sessions.value $(pick_line "$SESSIONS" 3)" +echo "tls_sessions.value $(pick_line "$SESSIONS" 4)" + diff --git a/plugins/courier/courier_log b/plugins/courier/courier_log index f1796d4e..fc1901c2 100755 --- a/plugins/courier/courier_log +++ b/plugins/courier/courier_log @@ -47,9 +47,9 @@ type=$(basename "$0" | tr "_" "\n" | tail -1) if [ "$1" = "config" ]; then cat < /dev/null || rm -f $pidfile } @@ -34,7 +33,7 @@ run_daemon() { run_watchdog if [ -f $pidfile ]; then touch $pidfile - else + else $pluginfull acquire & fi } @@ -52,12 +51,12 @@ run_config() { run_daemon cat << EOF graph_title $graph -graph_category $section +graph_category 1sec graph_vlabel average cpu use % graph_scale no -graph_total All CPUs +graph_total All CPUs update_rate 1 -graph_data_size custom 1d, 10s for 1w, 1m for 1t, 5m for 1y +graph_data_size custom 1d, 10s for 1w, 1m for 1t, 5m for 1y EOF cpun=0 for ((i=0; i<$cpus; i++)); do @@ -87,6 +86,3 @@ exit 0 # acquire which needs a different pid than watchdog, otherwise watchdog # could/will kill itself when expiring before the watched process is killed. # not a POSIX feature. - -# for Munin Plugin Gallery -# graph_category 1sec diff --git a/plugins/cpu/multicpu1sec-c.c b/plugins/cpu/multicpu1sec-c.c index ab8a8890..497d1a28 100644 --- a/plugins/cpu/multicpu1sec-c.c +++ b/plugins/cpu/multicpu1sec-c.c @@ -95,7 +95,11 @@ int acquire() { /* fork ourselves if not asked otherwise */ char* no_fork = getenv("no_fork"); if (! no_fork || strcmp("1", no_fork)) { - if (fork()) return; + pid_t child_pid = fork(); + if (child_pid) { + printf("# acquire() launched as PID %d\n", child_pid); + return 0; + } // we are the child, complete the daemonization /* Close standard IO */ @@ -116,7 +120,12 @@ int acquire() { int f = open(PROC_STAT, O_RDONLY); /* open the spoolfile */ - int cache_file = open(cache_filename, O_CREAT | O_APPEND | O_WRONLY); + FILE* cache_file = fopen(cache_filename, "a"); + if (!cache_file) { + return fail("cannot create cache_file"); + } + + int cache_file_fd = fileno(cache_file); /* loop each second */ while (1) { @@ -142,7 +151,7 @@ int acquire() { line = strtok(buffer, newl); /* lock */ - flock(cache_file, LOCK_EX); + flock(cache_file_fd, LOCK_EX); for (line = strtok(NULL, newl); line; line = strtok(NULL, newl)) { // Not on CPU lines anymore @@ -154,17 +163,15 @@ int acquire() { long used = usr + nice + sys + iowait + irq + softirq; - char out_buffer[1024]; - sprintf(out_buffer, "%s.value %ld:%ld\n", cpu_id, epoch, used); - - write(cache_file, out_buffer, strlen(out_buffer)); + fprintf(cache_file, "%s.value %ld:%ld\n", cpu_id, epoch, used); + fflush(cache_file); } /* unlock */ - flock(cache_file, LOCK_UN); + flock(cache_file_fd, LOCK_UN); } - close(cache_file); + fclose(cache_file); close(f); return 0; @@ -172,9 +179,13 @@ int acquire() { int fetch() { FILE* cache_file = fopen(cache_filename, "r+"); + if (!cache_file) { + return acquire(); + } /* lock */ - flock(fileno(cache_file), LOCK_EX); + int cache_file_fd = fileno(cache_file); + flock(cache_file_fd, LOCK_EX); /* cat the cache_file to stdout */ char buffer[1024]; @@ -182,7 +193,7 @@ int fetch() { printf("%s", buffer); } - ftruncate(fileno(cache_file), 0); + ftruncate(cache_file_fd, 0); fclose(cache_file); return 0; diff --git a/plugins/cpu/process_count b/plugins/cpu/process_count index 0802c8cd..166a02dd 100755 --- a/plugins/cpu/process_count +++ b/plugins/cpu/process_count @@ -26,7 +26,7 @@ -if [ "$1" = "autoconf" ] ; then +if [ "$1" = "autoconf" ] ; then if [ -n "$procs" ] ; then echo "yes" else @@ -49,23 +49,23 @@ if [ "$1" = "config" ] ; then echo 'graph_scale no' echo 'graph_period second' echo "graph_order $cprocs" - - for proc in $procs; do + + for proc in $procs; do cproc=${proc//[^A-Za-z0-9_]/_} echo "${cproc}.label $proc" echo "${cproc}.info CPU used by process $proc" done ; - + exit fi for proc in $procs ; do { cproc=${proc//[^A-Za-z0-9_]/_} ps axo '%mem,comm,command' | grep -v grep | grep "$proc" | LC_ALL=us_US awk ' - BEGIN { - SUM=0 + BEGIN { + SUM=0 } - { + { SUM+=1 COMM=$2 } diff --git a/plugins/cpu/process_cpushare b/plugins/cpu/process_cpushare index d6ee3dc2..029ac9d0 100755 --- a/plugins/cpu/process_cpushare +++ b/plugins/cpu/process_cpushare @@ -26,7 +26,7 @@ -if [ "$1" = "autoconf" ] ; then +if [ "$1" = "autoconf" ] ; then if [ -n "$procs" ] ; then echo "yes" else @@ -49,13 +49,13 @@ if [ "$1" = "config" ] ; then echo 'graph_scale no' echo 'graph_period second' echo "graph_order $cprocs" - - for proc in $procs; do + + for proc in $procs; do cproc=${proc//[^A-Za-z0-9_]/_} echo "${cproc}.label $proc" echo "${cproc}.info CPU used by process $proc" done ; - + exit fi @@ -64,14 +64,14 @@ fi for proc in $procs ; do { cproc=${proc//[^A-Za-z0-9_]/_} ps axo 'pcpu,comm,command' | grep -v grep | grep "$proc" | LC_ALL=us_US awk ' - BEGIN { - SUM=0 + BEGIN { + SUM=0 } - { - SUM+=$1 + { + SUM+=$1 } - END { - print "'${cproc}'.value "SUM + END { + print "'${cproc}'.value "SUM } ' } diff --git a/plugins/currency/bitcoin/bitcoind_ b/plugins/currency/bitcoin/bitcoind_ index bcb50aa0..4bdf75e0 100755 --- a/plugins/currency/bitcoin/bitcoind_ +++ b/plugins/currency/bitcoin/bitcoind_ @@ -1,49 +1,105 @@ -#!/usr/bin/env python -# bitcoind_ Munin plugin for Bitcoin Server Variables -# -# by Mike Koss -# Feb 14, 2012, MIT License -# -# You need to be able to authenticate to the bitcoind server to issue rpc's. -# This plugin supporst 2 ways to do that: -# -# 1) In /etc/munin/plugin-conf.d/bitcoin.conf place: -# -# [bitcoind_*] -# user your-username -# -# Then be sure your $HOME/.bitcoin/bitcoin.conf has the correct authentication info: -# rpcconnect, rpcport, rpcuser, rpcpassword -# -# 2) Place your bitcoind authentication directly in /etc/munin/plugin-conf.d/bitcoin.conf -# -# [bitcoind_*] -# env.rpcport 8332 -# env.rpcconnect 127.0.0.1 -# env.rpcuser your-username-here -# env.rpcpassword your-password-here -# -# To install all available graphs: -# -# sudo munin-node-configure --libdir=. --suggest --shell | sudo bash -# -# Leave out the "| bash" to get a list of commands you can select from to install -# individual graphs. -# -# Munin plugin tags: -# -#%# family=auto -#%# capabilities=autoconf suggest +#!/usr/bin/env python3 +"""=cut +=head1 NAME + + bitcoind_ - Track Bitcoin Server Variables + +=head1 CONFIGURATION + +You need to be able to authenticate to the bitcoind server to issue rpc's. +This plugin supports two ways to do that: + +1) In /etc/munin/plugin-conf.d/bitcoin.conf place: + + [bitcoind_*] + user your-username + env.bitcoin_configfile /home/your-username/.bitcoin/bitcoin.conf + + Then be sure that the file referenced above (typically: $HOME/.bitcoin/bitcoin.conf) + has the correct authentication info: + rpcconnect, rpcport, rpcuser, rpcpassword + +2) Place your bitcoind authentication directly in /etc/munin/plugin-conf.d/bitcoin.conf + + [bitcoind_*] + env.rpcport 8332 + env.rpcconnect 127.0.0.1 + env.rpcuser your-username-here + env.rpcpassword your-password-here + +To install all available graphs: + + sudo munin-node-configure --libdir=. --suggest --shell | sudo bash + +Leave out the "| bash" to get a list of commands you can select from to install +individual graphs. + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest + +=head1 LICENSE + +MIT License + +=head1 AUTHOR + +Copyright (C) 2012 Mike Koss + +=cut""" + +import base64 +import json import os +import re import sys import time -import re -import urllib2 -import json +import urllib.error +import urllib.request -DEBUG = False +DEBUG = os.getenv('MUNIN_DEBUG') == '1' + + +def _get_version(info): + # v0.15.2 version is represented as 150200 + return info['version'] // 10000 + + +def _rpc_get_initial_info(connection): + (info, connect_error) = connection.getnetworkinfo() + if connect_error: + if isinstance(connect_error, urllib.error.HTTPError) and connect_error.code == 404: + # getinfo RPC exists in version <= 0.15 + (info, connect_error) = connection.getinfo() + if connect_error: + return (None, None, connect_error) + else: + return (None, None, connect_error) # pass all other not-404 errors + + return (info, _get_version(info), None) + + +def _rpc_get_balance(info, minor_version, connection): + # see https://github.com/bitcoin/bitcoin/blob/239d199667888e5d60309f15a38eed4d3afe56c4/ + # doc/release-notes/release-notes-0.19.0.1.md#new-rpcs + if minor_version >= 19: + # we use getbalance*s* (plural) as old getbalance is being deprecated, + # and we have to calculate total balance (owned and watch-only) manually now. + (result, error) = connection.getbalances() + + total = sum(result[wallet_mode]['trusted'] + for wallet_mode in ('mine', 'watchonly') + if wallet_mode in result) + + info['balance'] = total + return info + else: + (result, error) = connection.getbalance() + info['balance'] = result + return info def main(): @@ -52,7 +108,6 @@ def main(): command = sys.argv[1] if len(sys.argv) > 1 else None request_labels = {'balance': ('Wallet Balance', 'BTC'), 'connections': ('Peer Connections', 'Connections'), - 'fees': ("Tip Offered", "BTC"), 'transactions': ("Transactions", "Transactions", ('confirmed', 'waiting')), 'block_age': ("Last Block Age", "Seconds"), @@ -66,60 +121,78 @@ def main(): if command == 'suggest': for var_name in request_labels.keys(): - print var_name - return + print(var_name) + return True if command == 'config': - print 'graph_category other' - print 'graph_title Bitcoin %s' % labels[0] - print 'graph_vlabel %s' % labels[1] + print('graph_category htc') + print('graph_title Bitcoin %s' % labels[0]) + print('graph_vlabel %s' % labels[1]) for label in line_labels: - print '%s.label %s' % (label, label) - return + print('%s.label %s' % (label, label)) + return True # Munin should send connection options via environment vars bitcoin_options = get_env_options('rpcconnect', 'rpcport', 'rpcuser', 'rpcpassword') bitcoin_options.rpcconnect = bitcoin_options.get('rpcconnect', '127.0.0.1') bitcoin_options.rpcport = bitcoin_options.get('rpcport', '8332') + error = None if bitcoin_options.get('rpcuser') is None: - conf_file = os.path.join(os.path.expanduser('~/.bitcoin'), 'bitcoin.conf') - bitcoin_options = parse_conf(conf_file) + conf_file = os.getenv("bitcoin_configfile") + if not conf_file: + error = "Missing environment settings: rpcuser/rcpassword or bitcoin_configfile" + elif not os.path.exists(conf_file): + error = "Configuration file does not exist: {}".format(conf_file) + else: + bitcoin_options = parse_conf(conf_file) - bitcoin_options.require('rpcuser', 'rpcpassword') + if not error: + try: + bitcoin_options.require('rpcuser', 'rpcpassword') + except KeyError as exc: + error = str(exc).strip("'") - bitcoin = ServiceProxy('http://%s:%s' % (bitcoin_options.rpcconnect, - bitcoin_options.rpcport), - username=bitcoin_options.rpcuser, - password=bitcoin_options.rpcpassword) + if not error: + bitcoin = ServiceProxy('http://%s:%s' % (bitcoin_options.rpcconnect, + bitcoin_options.rpcport), + username=bitcoin_options.rpcuser, + password=bitcoin_options.rpcpassword) + (info, minor_version, connect_error) = _rpc_get_initial_info(bitcoin) + if connect_error: + error = "Could not connect to Bitcoin server: {}".format(connect_error) - (info, error) = bitcoin.getinfo() + if command == 'autoconf': + if error: + print('no ({})'.format(error)) + else: + print('yes') + return True if error: - if command == 'autoconf': - print 'no' - return - else: - # TODO: Better way to report errors to Munin-node. - raise ValueError("Could not connect to Bitcoin server.") + print(error, file=sys.stderr) + return False + + if request_var == 'balance': + info = _rpc_get_balance(info, minor_version, bitcoin) if request_var in ('transactions', 'block_age'): - (info, error) = bitcoin.getblockhash(info['blocks']) - (info, error) = bitcoin.getblock(info) + (info, error) = bitcoin.getblockchaininfo() + (info, error) = bitcoin.getblock(info['bestblockhash']) info['block_age'] = int(time.time()) - info['time'] info['confirmed'] = len(info['tx']) - if request_var in ('fees', 'transactions'): - (memory_pool, error) = bitcoin.getrawmempool() - if memory_pool: - info['waiting'] = len(memory_pool) + if request_var == 'difficulty': + (info, error) = bitcoin.getblockchaininfo() - if command == 'autoconf': - print 'yes' - return + if request_var == 'transactions': + (memory_pool, error) = bitcoin.getmempoolinfo() + info['waiting'] = memory_pool['size'] for label in line_labels: - print "%s.value %s" % (label, info[label]) + print("%s.value %s" % (label, info[label])) + + return True def parse_conf(filename): @@ -138,7 +211,8 @@ def parse_conf(filename): continue (var, value) = (m.group(1), m.group(2).strip()) options[var] = value - except: + except OSError: + # the config file may be missing pass return options @@ -147,7 +221,9 @@ def parse_conf(filename): def get_env_options(*vars): options = Options() for var in vars: - options[var] = os.getenv(var) + value = os.getenv(var) + if value is not None: + options[var] = os.getenv(var) return options @@ -165,12 +241,11 @@ class Options(dict): if self.get(name) is None: missing.append(name) if len(missing) > 0: - raise ValueError("Missing required setting%s: %s." % - ('s' if len(missing) > 1 else '', - ', '.join(missing))) + raise KeyError("Missing required setting{}: {}." + .format('s' if len(missing) > 1 else '', ', '.join(missing))) -class ServiceProxy(object): +class ServiceProxy: """ Proxy for a JSON-RPC web service. Calls to a function attribute generates a JSON-RPC call to the host service. If a callback @@ -190,7 +265,7 @@ class ServiceProxy(object): return Proxy(self, method, id=self.id) -class Proxy(object): +class Proxy: def __init__(self, service, method, id=None): self.service = service self.method = method @@ -199,43 +274,43 @@ class Proxy(object): def __call__(self, *args): if DEBUG: arg_strings = [json.dumps(arg) for arg in args] - print "Calling %s(%s) @ %s" % (self.method, + print("Calling %s(%s) @ %s" % (self.method, ', '.join(arg_strings), - self.service.url) + self.service.url)) data = { 'method': self.method, 'params': args, 'id': self.id, - } - request = urllib2.Request(self.service.url, json.dumps(data)) + } + request = urllib.request.Request(self.service.url, json.dumps(data).encode()) if self.service.username: - # Strip the newline from the b64 encoding! - b64 = ('%s:%s' % (self.service.username, self.service.password)).encode('base64')[:-1] - request.add_header('Authorization', 'Basic %s' % b64) + auth_string = '%s:%s' % (self.service.username, self.service.password) + auth_b64 = base64.urlsafe_b64encode(auth_string.encode()).decode() + request.add_header('Authorization', 'Basic %s' % auth_b64) try: - body = urllib2.urlopen(request).read() - except Exception, e: + body = urllib.request.urlopen(request).read() + except urllib.error.URLError as e: return (None, e) if DEBUG: - print 'RPC Response (%s): %s' % (self.method, json.dumps(body, indent=4)) + print('RPC Response (%s): %s' % (self.method, json.dumps(json.loads(body), indent=4))) try: data = json.loads(body) - except ValueError, e: + except ValueError as e: return (None, e.message) # TODO: Check that id matches? return (data['result'], data['error']) def get_json_url(url): - request = urllib2.Request(url) - body = urllib2.urlopen(request).read() + request = urllib.request.Request(url) + body = urllib.request.urlopen(request).read() data = json.loads(body) return data if __name__ == "__main__": - main() + sys.exit(0 if main() else 1) diff --git a/plugins/currency/bitcoin/btcguild_hashrate_ b/plugins/currency/bitcoin/btcguild_hashrate_ index 33d72d7e..c60db5a3 100755 --- a/plugins/currency/bitcoin/btcguild_hashrate_ +++ b/plugins/currency/bitcoin/btcguild_hashrate_ @@ -7,35 +7,34 @@ import urllib2 import json URL = 'https://www.btcguild.com/api.php?api_key=' -API_KEY = sys.argv[0][(sys.argv[0].rfind('_')+1):] +API_KEY = sys.argv[0][(sys.argv[0].rfind('_') + 1):] STATS = URL + API_KEY -print STATS +print(STATS) command = '' if len(sys.argv) > 1: command = sys.argv[1] -header = {'User-Agent':'Mozilla/5.0'} -request = urllib2.Request(STATS,headers=header) +header = {'User-Agent': 'Mozilla/5.0'} +request = urllib2.Request(STATS, headers=header) mining_stats_raw = urllib2.urlopen(request) mining_stats = json.load(mining_stats_raw) workers = mining_stats['workers'] if command == 'config': - print "graph_title BTCGuild Hashrate" - print "graph_args --upper-limit 3000 -l 0" - print "graph_vlabel MHash/s" - print "graph_category other" + print("graph_title BTCGuild Hashrate") + print("graph_args --upper-limit 3000 -l 0") + print("graph_vlabel MHash/s") + print("graph_category htc") for worker in workers: label = workers[worker]['worker_name'] - print label + ".label " + label + print(label + ".label " + label) sys.exit(0) for worker in workers: - hash_rate = workers[worker]['hash_rate'] - label = workers[worker]['worker_name'] - print label + ".value %d" % int(hash_rate) - + hash_rate = workers[worker]['hash_rate'] + label = workers[worker]['worker_name'] + print(label + ".value %d" % int(hash_rate)) diff --git a/plugins/currency/bitcoin/slush_hashrate_ b/plugins/currency/bitcoin/slush_hashrate_ index 59533b19..b07493fc 100755 --- a/plugins/currency/bitcoin/slush_hashrate_ +++ b/plugins/currency/bitcoin/slush_hashrate_ @@ -25,7 +25,7 @@ if command == 'config': print "graph_title Slush Hashrate" print "graph_args --upper-limit 3000 -l 0" print "graph_vlabel MHash/s" - print "graph_category other" + print "graph_category htc" for worker in workers: label = worker.replace(".","_") print label + ".label " +label diff --git a/plugins/currency/bitcoin/slush_reward_ b/plugins/currency/bitcoin/slush_reward_ index 28629f97..2986ea00 100755 --- a/plugins/currency/bitcoin/slush_reward_ +++ b/plugins/currency/bitcoin/slush_reward_ @@ -24,7 +24,7 @@ if command == 'config': print "graph_title Slush Rewards" print "graph_args -l 0" print "graph_vlabel BTC" - print "graph_category other" + print "graph_category htc" print "unconfirmed_reward.label Unconfirmed Reward" print "estimated_reward.label Estimeated Reward" print "confirmed_reward.label Confirmed Reward" diff --git a/plugins/currency/ethereum/ethermine_hashrate_ b/plugins/currency/ethereum/ethermine_hashrate_ index ca0bc038..550743b8 100755 --- a/plugins/currency/ethereum/ethermine_hashrate_ +++ b/plugins/currency/ethereum/ethermine_hashrate_ @@ -20,8 +20,8 @@ ethermine_hashrate__ =head1 INTERPRETATION -This plugin shows the ethermine.org mining pool hashrate (MH/s) of a given ethereum address and rig name. -Hashrate is queried via ethermine.org API L. +This plugin shows the ethermine.org mining pool hashrate (MH/s) of a given ethereum address and rig +name. Hashrate is queried via ethermine.org API L. =head1 VERSION @@ -72,7 +72,7 @@ if command == 'config': print("graph_title Ethermine {}".format(miner)) print("graph_info ethermine.org Mining Pool Hashrate for {}_{}".format(eth_address, miner)) print("graph_vlabel Ethermine Hashrate") - print("graph_category other") + print("graph_category htc") print("ethermine_mhs_{}_{}.warning 20:".format(eth_address, miner)) print("ethermine_mhs_{}_{}.critical 10:".format(eth_address, miner)) print("ethermine_mhs_{}_{}.label MH/s:".format(eth_address, miner)) @@ -102,13 +102,14 @@ except ValueError: try: workers = mining_stats['workers'] -except: +except KeyError: print("JSON result error!", file=sys.stderr) sys.exit(9) -# ethermine.org sometimes has caching errors. You can see data from other miner. Always check your rig name. +# ethermine.org sometimes has caching errors. You can see data from other miner. Always check your +# rig name. for worker in workers: if workers[worker]['worker'] == miner: hash_rate = workers[worker]['hashrate'] hash_rate = hash_rate.replace(" MH/s", "") - print("ethermine_mhs_{}_{}.value {}".format(eth_address, miner, hash_rate)) \ No newline at end of file + print("ethermine_mhs_{}_{}.value {}".format(eth_address, miner, hash_rate)) diff --git a/plugins/currency/ethereum/etherscan_balance_ b/plugins/currency/ethereum/etherscan_balance_ index 2445b7f6..e3bc3d2b 100755 --- a/plugins/currency/ethereum/etherscan_balance_ +++ b/plugins/currency/ethereum/etherscan_balance_ @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ =head1 NAME @@ -81,12 +81,13 @@ if command == 'config': print("graph_title Ether {}".format(eth_address)) print("graph_info Ethereum Account Balance for Address {}".format(eth_address)) print("graph_vlabel Ethereum Balance") - print("graph_category other") + print("graph_category htc") print("wei_balance_{0}.cdef wei_balance_{0},1000000000000000000,/".format(eth_address)) print("wei_balance_{}.label ETH".format(eth_address)) sys.exit(0) -ethercan_balance_api_url = 'https://api.etherscan.io/api?module=account&action=balance&tag=latest&address=' + eth_address +ethercan_balance_api_url = ('https://api.etherscan.io/api' + '?module=account&action=balance&tag=latest&address=' + eth_address) etherscan_req = Request(ethercan_balance_api_url) # User-Agent to bypass Cloudflare @@ -109,8 +110,8 @@ except ValueError: try: eth = int(etherscan_balance['result']) -except: +except (KeyError, ValueError): print("JSON result error!", file=sys.stderr) sys.exit(9) -print("wei_balance_{}.value {}".format(eth_address, eth)); +print("wei_balance_{}.value {}".format(eth_address, eth)) diff --git a/plugins/currency/nanopool/nanopool_ b/plugins/currency/nanopool/nanopool_ index 08bfa036..7338734a 100755 --- a/plugins/currency/nanopool/nanopool_ +++ b/plugins/currency/nanopool/nanopool_ @@ -75,7 +75,7 @@ except: from urllib2 import Request from urllib2 import urlopen from urllib2 import URLError - + def define_graph_types(): @@ -131,7 +131,7 @@ def request_data(): print("API request error: {0}". format(err), file=sys.stderr) exit(1) except: - print("Unhandled error:", sys.exc_info()[0]) + print("Unhandled error:", sys.exc_info()[0]) exit(1) try: result = json.loads(txt.decode("utf-8")) @@ -148,7 +148,7 @@ def write_config_worker(): print("graph_title Hashrate in Mh/s per worker ({0})".format(account_address)) print("graph_args --base 1000 -l 0") print("graph_vlabel Mh/s") - print("graph_category other") + print("graph_category htc") print("graph_scale no") for val in worker_data: @@ -158,7 +158,7 @@ def write_config_worker(): print("worker_{0}_hashrate.info Hashrate of worker '{1}'".format(worker_name, val["id"])) print("worker_{0}_hashrate.min 0".format(worker_name)) print("worker_{0}_hashrate.draw LINE1".format(worker_name)) - + for val in worker_data: print("") worker_name = "_".join(val["id"].split()) @@ -166,7 +166,7 @@ def write_config_worker(): print("graph_title Hashrate in Mh/s of worker {0}".format(worker_name)) print("graph_args --base 1000 -l 0") print("graph_vlabel Mh/s") - print("graph_category other") + print("graph_category htc") print("graph_scale no") print("whashrate.label hashrate") print("whashrate.type GAUGE") @@ -179,7 +179,7 @@ def write_config_worker(): print("graph_title Number of accepted shares ({0})".format(account_address)) print("graph_args --base 1000 -l 0") print("graph_vlabel Shares per ${graph_period}") - print("graph_category other") + print("graph_category htc") print("graph_scale no") print("graph_period minute") @@ -198,7 +198,7 @@ def write_config_worker(): print("graph_title Number of accepted shares {0}".format(worker_name)) print("graph_args --base 1000 -l 0") print("graph_vlabel Shares per ${graph_period}") - print("graph_category other") + print("graph_category htc") print("graph_scale no") print("graph_period minute") print("wshares.label shares") @@ -234,7 +234,7 @@ def write_data_worker(data): print("") print("multigraph worker_shares_{0}.worker_{1}".format(account_address, worker_name)) print("wshares.value {0} ".format(val["rating"])) - + def write_config(): @@ -247,7 +247,7 @@ def write_config(): params = GRAPH_TYPES[graph_type] for item in params: print("graph_title {0}".format(item["title"])) - print("graph_category other") + print("graph_category htc") if "info" in item: print("graph_info {0}".format(item["info"])) if "scale" in item: diff --git a/plugins/currency/zcash/zcash_flypool_hashrate_ b/plugins/currency/zcash/zcash_flypool_hashrate_ new file mode 100755 index 00000000..e13a95a2 --- /dev/null +++ b/plugins/currency/zcash/zcash_flypool_hashrate_ @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 + +""" +=head1 NAME + +zcash_flypool_hashrate_ - Munin plugin to monitor your zcash.flypool.org hashrate (H/s) + +=head1 APPLICABLE SYSTEMS + +All systems with "python3" and "munin" + +=head1 CONFIGURATION + +zcash_flypool_hashrate__ + +=head1 SYNOPSIS + + ln -s /usr/share/munin/plugins/zcash_flypool_hashrate_ \ + /etc/munin/plugins/zcash_flypool_hashrate_t1gMVWjGhdjvb71UU11JDrFmiZhgUf4x5TH_mine + +=head1 INTERPRETATION + +This plugin shows the zcash.flypool.org mining pool hashrate (H/s) of a given Zcasg address and rig name. +Hashrate is queried via Flypool API L. + +=head1 VERSION + +0.0.1 + +=head1 AUTHOR + +L + +=head1 LICENSE + +GPLv2 + +=head1 MAGIC MARKERS + + #%# family=manual + +=cut +""" + +import sys +import json +import codecs +from urllib.request import urlopen +from urllib.request import Request + +command = '' +if len(sys.argv) > 1: + command = sys.argv[1] + +try: + zcash_address, miner = sys.argv[0].split("_")[3:] +except ValueError: + print("The filename of this plugin (or its symlink) should follow this pattern: " + "'zcash_flypool_hashrate__'", file=sys.stderr) + sys.exit(9) + +if command == 'config': + print("graph_title Flypool {}".format(miner)) + print("graph_info zcash.flypool.org Mining Pool Hashrate for {}_{}".format(zcash_address, miner)) + print("graph_vlabel Flypool Hashrate") + print("graph_category htc") + print("flypool_hs_{}_{}.warning 200:".format(zcash_address, miner)) + print("flypool_hs_{}_{}.critical 100:".format(zcash_address, miner)) + print("flypool_hs_{}_{}.label H/s:".format(zcash_address, miner)) + sys.exit(0) + + +flypool_api_url = 'https://api-zcash.flypool.org/miner/' + zcash_address + '/worker/' + miner + '/currentStats' + +mining_req = Request(flypool_api_url) +# User-Agent to bypass Cloudflare +mining_req.add_header('User-Agent', 'Flypool Munin Plugin/1.0') + +try: + mining_stats_raw = urlopen(mining_req, timeout=15) +except IOError as exc: + print("Failed to request Flypool API: {}".format(exc), file=sys.stderr) + sys.exit(9) + +hash_rate = "U" + +try: + mining_stats = json.loads(mining_stats_raw.read().decode("utf-8")) +except ValueError: + print("Failed to parse JSON response.", file=sys.stderr) +else: + try: + worker = mining_stats['data'] + except (KeyError, TypeError): + print("JSON result error!", file=sys.stderr) + else: + try: + hash_rate = worker['currentHashrate'] + except (KeyError, TypeError): + print("No current Hashrate!", file=sys.stderr) + +print("flypool_hs_{}_{}.value {}".format(zcash_address, miner, hash_rate)) diff --git a/plugins/cyrus/cyrus-imapd b/plugins/cyrus/cyrus-imapd index 4492b7a1..dbb28a50 100755 --- a/plugins/cyrus/cyrus-imapd +++ b/plugins/cyrus/cyrus-imapd @@ -25,7 +25,7 @@ cyrus-imapd - Munin plugin to monitor the load on a cyrus imapd server =head1 CONFIGURATION The user running this plugin needs read and write access to the -cyrus-imapd proc directory. You will need to add the following to the +cyrus-imapd proc directory. You will need to add the following to the munin-node/plugin configuration: [cyrus-imapd] @@ -87,8 +87,8 @@ if [ "x${PROCDIR}x" = "xx" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. - +# graphs should look. + if [ "$1" = "config" ]; then echo 'graph_title Cyrus IMAPd Load' echo 'graph_args --base 1000 -l 0' diff --git a/plugins/darwin/dar_cpuusage b/plugins/darwin/dar_cpuusage index 788cbfeb..056dba02 100755 --- a/plugins/darwin/dar_cpuusage +++ b/plugins/darwin/dar_cpuusage @@ -91,7 +91,7 @@ for ( $i = 1; ($i < $#top and $quit == 0); $i++ ) { print "sys.value " . $sys . "\n"; print "user.value " . $usr . "\n"; print "idle.value " . $idl . "\n"; - $quit = 1; + $quit = 1; } } diff --git a/plugins/darwin/dar_uptime b/plugins/darwin/dar_uptime index 5271d453..2a49173d 100755 --- a/plugins/darwin/dar_uptime +++ b/plugins/darwin/dar_uptime @@ -1,4 +1,4 @@ -#!/usr/bin/perl +#!/usr/bin/perl # -*- perl -*- =head1 NAME @@ -69,10 +69,10 @@ if ( defined($ARGV[0])) { } } -@uptime = `/usr/bin/uptime`; +@uptime = `/usr/bin/uptime`; foreach(@uptime) { $_ =~ m/^.+up (.+?), \d+ us.+$/; - $timestr = $1; + $timestr = $1; if ( $timestr =~ m/^(\d+) day.+?$/ ) { $days = $1; } else { $days = 0; } diff --git a/plugins/darwin/dar_vpnd b/plugins/darwin/dar_vpnd index f454429b..ab1e9f73 100755 --- a/plugins/darwin/dar_vpnd +++ b/plugins/darwin/dar_vpnd @@ -32,12 +32,12 @@ my $cmd = "ps -ef | awk '/[p]ppd/ {print substr(\$NF,2);}' | wc -l"; if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { print "graph_category network\n"; - print "graph_args --base 1024 -r --lower-limit 0\n"; + print "graph_args --base 1024 -r --lower-limit 0\n"; print "graph_title Number of VPN Connections\n"; print "graph_vlabel VPN Connections\n"; - print "graph_info The Graph shows the Number of VPN Connections\n"; + print "graph_info The Graph shows the Number of VPN Connections\n"; print "connections.label Number of VPN Connections\n"; - print "connections.type GAUGE\n"; + print "connections.type GAUGE\n"; } else { my $output = `$cmd`; print "connections.value $output"; diff --git a/plugins/debian/deborphan b/plugins/debian/deborphan index 10d5ac44..633ac7df 100755 --- a/plugins/debian/deborphan +++ b/plugins/debian/deborphan @@ -76,7 +76,7 @@ EOF else for CAT in ${CATEGORIES}; do CATFIELD=$(clean_fieldname "${CAT}") - CATDATA=$(echo "${OUT}" | sed -n "s#${CAT} \+##p") + CATDATA=$(echo "${OUT}" | sed -n 's#'"${CAT}"' \+##p') echo "${CATFIELD}.value $(echo "${CATDATA}" | wc -l)" echo "${CATFIELD}.extinfo $(echo "${CATDATA}" | tr '\n' ' ')" done diff --git a/plugins/debian/debsecan_ b/plugins/debian/debsecan_ index a3a381b4..ab743004 100755 --- a/plugins/debian/debsecan_ +++ b/plugins/debian/debsecan_ @@ -24,9 +24,9 @@ The default configuration is as follows. [debsecan] env.suite jessie - env.fixed_warn 1 + env.fixed_warning 1 env.fixed_critical 1000 - env.remote_warn 1 + env.remote_warning 1 env.remote_critical 10 The name of the group needs to match the name of the symlink to be applied. @@ -84,7 +84,8 @@ esac if [ "$1" = "config" ] ; then cat <. + + +import btrfs +import sys + + +def munin_config(fs): + fsid = str(fs.fsid).replace('-', '_') + print("multigraph btrfs_device_stats_" + fsid) + print("graph_args --base 1000 -l 0") + print("graph_vlabel total btrfs attribute value") + print("graph_title btrfs total device stats for " + fs.path) + print("graph_category disk") + print("graph_info This graph shows the total stats of devices used by btrfs") + + print("corruption_errs_total.label Corruption Errors") + print("flush_errs_total.label Flush Errors") + print("generation_errs_total.label Generation Errors") + print("read_errs_total.label Read Errors") + print("write_errs_total.label Write Errors") + print("nr_items_total.label Nr. of Items") + print("flags_total.label Nr. of Flags") + + print("") + + devices = fs.devices() + for this_device in devices: + this_dev_info = fs.dev_info(this_device.devid) + this_dev_name = this_dev_info.path.replace('/dev/', '') + print("multigraph btrfs_device_stats_" + fsid + "." + str(this_device.devid)) + print("graph_args --base 1000 -l 0") + print("graph_vlabel btrfs attribute value") + print("graph_title btrfs device stats for " + this_dev_name) + print("graph_category disk") + print("graph_info This graph shows stats of devices used by btrfs") + + print("corruption_errs.label Corruption Errors") + print("corruption_errs.warming 1") + print("flush_errs.label Flush Errors") + print("flush_errs.warming 1") + print("generation_errs.label Generation Errors") + print("generation_errs.warming 1") + print("read_errs.label Read Errors") + print("read_errs.warming 1") + print("write_errs.label Write Errors") + print("write_errs.warming 1") + print("nr_items.label Nr. of Items") + print("flags.label Nr. of Flags") + print("flags.warming 1") + + print("") + + +def munin_values(fs): + corruption_errs_total = 0 + flush_errs_total = 0 + generation_errs_total = 0 + read_errs_total = 0 + write_errs_total = 0 + nr_items_total = 0 + flags_total = 0 + + fsid = str(fs.fsid).replace('-', '_') + devices = fs.devices() + + for this_device in devices: + this_dev_stat = fs.dev_stats(this_device.devid, False) + + corruption_errs = this_dev_stat.corruption_errs + flush_errs = this_dev_stat.flush_errs + generation_errs = this_dev_stat.generation_errs + read_errs = this_dev_stat.read_errs + write_errs = this_dev_stat.write_errs + nr_items = this_dev_stat.nr_items + flags = this_dev_stat.flags + + corruption_errs_total = corruption_errs_total + corruption_errs + flush_errs_total = flush_errs_total + flush_errs + generation_errs_total = generation_errs_total + generation_errs + read_errs_total = read_errs_total + read_errs + write_errs_total = write_errs_total + write_errs + nr_items_total = nr_items_total + nr_items + flags_total = flags_total + flags + + print("multigraph btrfs_device_stats_" + fsid + "." + str(this_device.devid)) + + print("corruption_errs.value " + str(corruption_errs)) + print("flush_errs.value " + str(flush_errs)) + print("generation_errs.value " + str(generation_errs)) + print("read_errs.value " + str(read_errs)) + print("write_errs.value " + str(write_errs)) + print("nr_items.value " + str(nr_items)) + print("flags.value " + str(flags)) + + print("") + + print("multigraph btrfs_device_stats_" + fsid) + + print("corruption_errs_total.value " + str(corruption_errs_total)) + print("flush_errs_total.value " + str(flush_errs_total)) + print("generation_errs_total.value " + str(generation_errs_total)) + print("read_errs_total.value " + str(read_errs_total)) + print("write_errs_total.value " + str(write_errs_total)) + print("nr_items_total.value " + str(nr_items_total)) + print("flags_total.value " + str(flags_total)) + + print("") + + +def main(): + for path in btrfs.utils.mounted_filesystem_paths(): + with btrfs.FileSystem(path) as fs: + if len(sys.argv) > 1 and sys.argv[1] == "config": + munin_config(fs) + else: + munin_values(fs) + + +if __name__ == "__main__": + main() + +exit(0) diff --git a/plugins/disk/btrfs_device_usage b/plugins/disk/btrfs_device_usage new file mode 100755 index 00000000..c5c7238f --- /dev/null +++ b/plugins/disk/btrfs_device_usage @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 + + +""" +=pod + +=head1 NAME + +btrfs_device_usage - Script to monitor usage of btrfs devices + +=head1 CONFIGURATION + +Simply create a symlink in your plugins directory like with any other plugin. +Must be run as root. + +[btrfs_device_usage] +user root + +=head2 DEFAULT CONFIGURATION + +=head1 BUGS + +=head1 AUTHOR + +2019, HaseHarald + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 LICENSE + +LGPLv3 + +=cut +""" + + +# This file contains a munin-plugin to gather btrfs statistics per device. +# +# This is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this plugin. If not, see . + + +import btrfs +import sys + + +def munin_config(fs): + fsid = str(fs.fsid).replace('-', '_') + + print("multigraph btrfs_device_usage_byte_" + fsid) + print("graph_title btrfs usage by device in bytes on " + fs.path) + print("graph_args --base 1024 -l 0") + print("graph_scale yes") + print("graph_vlabel bytes") + print("graph_category disk") + print("graph_info This graph shows bytes used by btrfs on every device") + + devices = fs.devices() + for this_device in devices: + this_dev_info = fs.dev_info(this_device.devid) + this_dev_name = this_dev_info.path.replace('/dev/', '') + print("btrfs_bytes_" + fsid + "_" + str(this_device.devid) + + ".label " + this_dev_name) + + print("") + + print("multigraph btrfs_device_usage_percent_" + fsid) + print("graph_title btrfs usage by device in percent on " + fs.path) + print("graph_args --base 1000 -l 0") + print("graph_scale no") + print("graph_vlabel %") + print("graph_category disk") + print("graph_info This graph shows percentage used by btrfs on every \ + device. Maesured in percentage of device capacity.") + + devices = fs.devices() + for this_device in devices: + this_dev_info = fs.dev_info(this_device.devid) + this_dev_name = this_dev_info.path.replace('/dev/', '') + print("btrfs_percent_" + fsid + "_" + str(this_device.devid) + + ".label " + this_dev_name) + print("btrfs_percent_" + fsid + "_" + str(this_device.devid) + ".warning 95") + print("btrfs_percent_" + fsid + "_" + str(this_device.devid) + ".critical 98") + + print("") + + +def munin_values(fs): + fsid = str(fs.fsid).replace('-', '_') + devices = fs.devices() + + print("multigraph btrfs_device_usage_byte_" + fsid) + + for this_device in devices: + this_dev_info = fs.dev_info(this_device.devid) + print("btrfs_bytes_" + fsid + "_" + str(this_device.devid) + + ".value " + str(this_dev_info.bytes_used)) + + print("") + + # Reset device iterator + devices = fs.devices() + + print("multigraph btrfs_device_usage_percent_" + fsid) + + for this_device in devices: + this_dev_info = fs.dev_info(this_device.devid) + usage = 100.0 * this_dev_info.bytes_used / this_dev_info.total_bytes + print("btrfs_percent_" + fsid + "_" + str(this_device.devid) + + ".value " + str(round(usage, 2))) + + print("") + + +def main(): + for path in btrfs.utils.mounted_filesystem_paths(): + with btrfs.FileSystem(path) as fs: + if len(sys.argv) > 1 and sys.argv[1] == "config": + munin_config(fs) + else: + munin_values(fs) + + +if __name__ == "__main__": + main() + +exit(0) diff --git a/plugins/disk/btrfs_subvol_usage b/plugins/disk/btrfs_subvol_usage old mode 100644 new mode 100755 index 887eacc6..d58e0977 --- a/plugins/disk/btrfs_subvol_usage +++ b/plugins/disk/btrfs_subvol_usage @@ -19,8 +19,8 @@ env.fsroot /path/to/btrfs/filesystem =head1 USAGE -Link/Copy this plugin to /etc/munin/plugins/ and restart the munin-node. - +Link/Copy this plugin to /etc/munin/plugins/ and restart the munin-node. + =head1 AUTHOR Alexander Knöbel @@ -62,7 +62,7 @@ while (my $line = ) { close SVS; # get sizes from quota -open(QGS, "btrfs qgroup show @fsroot |") +open(QGS, "btrfs qgroup show --raw @fsroot |") or die("Failed to run 'btrfs qgroup show': " . $!); while (my $line = ) { chomp $line; diff --git a/plugins/disk/df_with_nfs b/plugins/disk/df_with_nfs index 08f25b3f..d1a19697 100755 --- a/plugins/disk/df_with_nfs +++ b/plugins/disk/df_with_nfs @@ -60,7 +60,7 @@ if [ "$1" = "autoconf" ]; then fi clean_name() { - echo $1 $7 $2 | sed 's/[\/.-]/_/g'| awk "{ + echo $1 $7 $2 | sed 's/[\/.-]/_/g'| awk "{ if (\$3 == \"tmpfs\") n=\$1\$2 else @@ -80,12 +80,12 @@ if [ "$1" = "config" ]; then df -T -P -l -x none -x unknown -x udf -x iso9660 -x romfs -x ramfs -x tmpfs | sed 1d | grep -v "//" | sort | while read i; do name=`clean_name $i` echo -n "$name.label " - echo $i | awk "{ + echo $i | awk "{ dir=\$7 if (length(dir) <= $MAXLABEL) print dir else - printf (\"...%s\n\", substr (dir, length(dir)-$MAXLABEL+4, $MAXLABEL-3)) + printf (\"...%s\n\", substr (dir, length(dir)-$MAXLABEL+4, $MAXLABEL-3)) print \"$name.info \" \$7 \" (\" \$2 \") -> \" \$1; }" echo "$name.warning 92" diff --git a/plugins/disk/dirsizes b/plugins/disk/dirsizes index 1d417847..0767243d 100755 --- a/plugins/disk/dirsizes +++ b/plugins/disk/dirsizes @@ -21,6 +21,8 @@ # You can test this plugin by calling it with params "test" and your watchdirs: # ./dirsizes test /dir1,/tmp/dir2 # +# The directories can contain wildcards that are automatically expanded. +# # ############################################################################## # @@ -45,6 +47,17 @@ else { @watchdirs = split( ",", $ENV{"watchdirs"} ); } +# Glob all of the watchdirs. +my @globbed_watchdirs; +foreach my $watchdir ( @watchdirs ) +{ + foreach my $expanded_dir ( glob( $watchdir ) ) + { + push @globbed_watchdirs, $expanded_dir; + } +} +@watchdirs = @globbed_watchdirs; + # Config or read request? if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { @@ -88,9 +101,8 @@ sub getSize { my ($dir) = @_; # Get the size via `du` - my @dirsize = split( ' ', `du -cb $dir | grep "total" | tail -1 ` ); + my @dirsize = split( ' ', `du -cb "$dir" | grep "total" | tail -1 ` ); return @dirsize[0]; } exit 0; - diff --git a/plugins/disk/dm_cache_occupancy_ b/plugins/disk/dm_cache_occupancy_ index cddee218..3d45b5fd 100755 --- a/plugins/disk/dm_cache_occupancy_ +++ b/plugins/disk/dm_cache_occupancy_ @@ -50,7 +50,10 @@ GPLv2 CVOL=${0##*dm_cache_occupancy_} #workaround for http://munin-monitoring.org/ticket/1236 -CVOL=${CVOL/____/-} +while [[ $CVOL == *"____"* ]] +do + CVOL=${CVOL/____/-} +done case $1 in autoconf) diff --git a/plugins/disk/dm_cache_statistics_ b/plugins/disk/dm_cache_statistics_ index 94a959e2..97257e1c 100755 --- a/plugins/disk/dm_cache_statistics_ +++ b/plugins/disk/dm_cache_statistics_ @@ -50,7 +50,10 @@ GPLv2 CVOL=${0##*dm_cache_statistics_} #workaround for http://munin-monitoring.org/ticket/1236 -CVOL=${CVOL/____/-} +while [[ $CVOL == *"____"* ]] +do + CVOL=${CVOL/____/-} +done case $1 in autoconf) diff --git a/plugins/disk/du b/plugins/disk/du index f0133d49..547a2d68 100755 --- a/plugins/disk/du +++ b/plugins/disk/du @@ -16,10 +16,10 @@ # Requirements # - bash (or change first line to sh instead of bash or any other shell) # - existing and readable directory to scan -# - du command, it exists on most of the *nix operating systems +# - du command, it exists on most of the *nix operating systems # ################################################################# -# +# # Configuration # # directory to check @@ -44,10 +44,10 @@ ID=1; # Changelog # # Revision 0.1 Tue 03 Feb 2009 02:16:02 PM CET _KaszpiR_ -# - initial release, -# +# - initial release, +# ################################################################# -# Magick markers (optional - used by munin-config and som installation +# Magick markers (optional - used by munin-config and some installation # scripts): #%# family=auto #%# capabilities=autoconf @@ -58,11 +58,10 @@ ID=1; if [ "$1" = "autoconf" ]; then if [ -d $DIR ]; then echo "yes" - exit 0 else echo "no (check your path)" - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/disk/du-2 b/plugins/disk/du-2 index 41e4adc1..e2689a5b 100755 --- a/plugins/disk/du-2 +++ b/plugins/disk/du-2 @@ -92,11 +92,12 @@ if( (defined $ARGV[0]) && ($ARGV[0] eq "config") ) { if ($bar =~ m/(\d+)\s+(.+)/) { my $dir = $2; clean_path(\$dir); - print "$dir.label $dir\n"; + my $clean_dir = clean_fieldname($dir); + print "$clean_dir.label $dir\n"; if ($foo++) { - print "$dir.draw STACK\n"; + print "$clean_dir.draw STACK\n"; } else { - print "$dir.draw AREA\n"; + print "$clean_dir.draw AREA\n"; } } } @@ -106,20 +107,21 @@ if( (defined $ARGV[0]) && ($ARGV[0] eq "config") ) { } ##### fetch -open (FILE, "<", $CACHEFILE) or munin_exit_fail(); -while(defined (my $foo = )) { - if ($foo =~ m/(\d+)\s+(.+)/) { - my ($field, $value) = ($2, $1); - clean_path(\$field); - print $field, ".value ", $value, "\n"; +if (open (FILE, "<", $CACHEFILE)) { + while(defined (my $foo = )) { + if ($foo =~ m/(\d+)\s+(.+)/) { + my ($field, $value) = ($2, $1); + clean_path(\$field); + print clean_fieldname($field), ".value ", $value, "\n"; + } } + close(FILE); } -close(FILE); daemonize(); # ## -### PUBLIC FONCTIONS +### PUBLIC FUNCTIONS ############################################################################### ## Used to create the fork sub daemonize { @@ -133,7 +135,7 @@ sub daemonize { ## In the child, let's get the du infos if necessary if (cache_is_too_old() && du_not_running()) { my $dirs = $ENV{dirs}; - system("touch $LOCKFILE; du -sb $dirs > $TEMPFILE; cat $TEMPFILE > $CACHEFILE; rm $LOCKFILE; date +%s > $TIMEFILE;"); + system("touch $LOCKFILE; du -sb $dirs | sort -n -r > $TEMPFILE; cat $TEMPFILE > $CACHEFILE; rm $LOCKFILE; date +%s > $TIMEFILE;"); } exit; } ## daemonize @@ -183,7 +185,7 @@ sub munin_exit_fail { # ## -### INTERNALS FONCTIONS +### INTERNALS FUNCTIONS ############################################################################### sub __munin_exit { my $exitcode = shift; diff --git a/plugins/disk/e2 b/plugins/disk/e2 old mode 100644 new mode 100755 index 4eaed72e..c1377aa0 --- a/plugins/disk/e2 +++ b/plugins/disk/e2 @@ -33,7 +33,7 @@ print_details() { val=`echo ${line#*:}` val1=${val%% *} [ -z "$line" ] && break - + case "$key" in "Mount count") myname=$1; mc=$val ;; "Maximum mount count") mmc=$val ;; @@ -46,22 +46,20 @@ print_details() { } lse2() { - for name in {h,s}d{a..z}{1..16}; do - if [ -b /dev/$name ]; then - LANG=C dumpe2fs /dev/$name 2>/dev/null | print_details "$name" - fi - done + while read -r line ; do + value=( $line ); + LANG=C dumpe2fs -h "${value[1]}" 2>/dev/null | print_details "${value[0]}" + done <<< "$(lsblk -o NAME,PATH,FSTYPE -r | grep ext[2-4] | uniq )" } if [ "$1" = "autoconf" ]; then if which dumpe2fs >/dev/null 2>&1; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then lse2 | while read name rest; do @@ -79,6 +77,7 @@ plugin=${self%%_*} if [ -n "${self//[!_]/}" ]; then blkdev=${self#*_} blkdevAppend=" ($blkdev)" + blkdevpath=$(lsblk -o PATH,NAME -r | grep "$blkdev" | uniq | cut -d' ' -f1) else blkdev= blkdevAppend= @@ -90,7 +89,7 @@ if [ "$1" = "config" ]; then echo "graph_category disk" args="-l 0" case "$plugin" in - mounts) + mounts) echo "graph_vlabel times" echo "graph_title Ext2 Filesystem mount details" ;; @@ -98,41 +97,42 @@ if [ "$1" = "config" ]; then echo "graph_vlabel days" echo "graph_title Ext2 Filesystem fsck interval" ;; - writes) + writes) args="$args --base 1024" echo "graph_vlabel bytes" echo "graph_title Ext2 Filesystem lifetime writes$blkdevAppend" ;; esac echo "graph_args $args" - + if [ -z "$blkdev" ]; then lse2 else - LANG=C dumpe2fs /dev/$blkdev 2>/dev/null | print_details "$blkdev" + LANG=C dumpe2fs -h "$blkdevpath" 2>/dev/null | print_details "$blkdev" fi | \ while read name mounts maxmounts last interval writes; do [ -z "$blkdev" ] && blkdevAppend2=" ($name)" || blkdevAppend2= + name_esc=${name//-/_} case "$plugin" in mounts) - echo "${name}_mounts.label Mount count$blkdevAppend2" - echo "${name}_mounts.draw LINE2" - echo "${name}_maxmounts.label Max. mount count$blkdevAppend2" - echo "${name}_maxmounts.draw LINE1" + echo "${name_esc}_mounts.label Mount count$blkdevAppend2" + echo "${name_esc}_mounts.draw LINE2" + echo "${name_esc}_maxmounts.label Max. mount count$blkdevAppend2" + echo "${name_esc}_maxmounts.draw LINE1" ;; checked) - echo "${name}_last.label Since checked$blkdevAppend2" - echo "${name}_last.draw LINE2" - echo "${name}_last.cdef ${name}_last,86400,/" - echo "${name}_interval.label Check interval$blkdevAppend2" - echo "${name}_interval.draw LINE1" - echo "${name}_interval.cdef ${name}_interval,86400,/" + echo "${name_esc}_last.label Since checked$blkdevAppend2" + echo "${name_esc}_last.draw LINE2" + echo "${name_esc}_last.cdef ${name_esc}_last,86400,/" + echo "${name_esc}_interval.label Check interval$blkdevAppend2" + echo "${name_esc}_interval.draw LINE1" + echo "${name_esc}_interval.cdef ${name_esc}_interval,86400,/" ;; writes) - echo "${name}_writes.label Lifetime writes$blkdevAppend2" - echo "${name}_writes.type GAUGE" - echo "${name}_writes.draw LINE2" - echo "${name}_writes.cdef ${name}_writes,1073741824,*" + echo "${name_esc}_writes.label Lifetime writes$blkdevAppend2" + echo "${name_esc}_writes.type GAUGE" + echo "${name_esc}_writes.draw LINE2" + echo "${name_esc}_writes.cdef ${name_esc}_writes,1073741824,*" ;; esac done @@ -147,22 +147,23 @@ fi if [ -z "$blkdev" ]; then lse2 else - LANG=C dumpe2fs /dev/$blkdev 2>/dev/null | print_details "$blkdev" + LANG=C dumpe2fs -h "$blkdevpath" 2>/dev/null | print_details "$blkdev" fi | \ while read name mounts maxmounts last interval writes; do + name_esc=${name//-/_} case "$plugin" in mounts) - echo "${name}_mounts.value $mounts" - echo "${name}_maxmounts.value $maxmounts" + echo "${name_esc}_mounts.value $mounts" + echo "${name_esc}_maxmounts.value $maxmounts" ;; checked) now=`date +%s` past=$(( now - last )) - echo "${name}_last.value $past" - echo "${name}_interval.value $interval" + echo "${name_esc}_last.value $past" + echo "${name_esc}_interval.value $interval" ;; writes) - echo "${name}_writes.value $writes" + echo "${name_esc}_writes.value $writes" ;; esac done diff --git a/plugins/disk/example-graphs/btrfs_device_usage-day.png b/plugins/disk/example-graphs/btrfs_device_usage-day.png new file mode 100644 index 00000000..58789447 Binary files /dev/null and b/plugins/disk/example-graphs/btrfs_device_usage-day.png differ diff --git a/plugins/disk/example-graphs/btrfs_subvol_usage-day.png b/plugins/disk/example-graphs/btrfs_subvol_usage-day.png new file mode 100644 index 00000000..75b5fd8b Binary files /dev/null and b/plugins/disk/example-graphs/btrfs_subvol_usage-day.png differ diff --git a/plugins/disk/file_age b/plugins/disk/file_age index 29636d14..52743de3 100755 --- a/plugins/disk/file_age +++ b/plugins/disk/file_age @@ -8,23 +8,23 @@ case $1 in COUNTER=1 while [ $COUNTER -gt 0 ]; do FILE_PATH="file${COUNTER}_path" - + # Is the path for this file specified? eval FILE=\$$FILE_PATH if [ "$FILE" == "" ]; then break; fi - + # It is! Add it to the graphs. GRAPH_ORDER="$GRAPH_ORDER file_$COUNTER" - + # Does this file have a specified label? LABEL_COUNTER="file${COUNTER}_label" eval LABEL=\$$LABEL_COUNTER if [ "$LABEL" == "" ]; then LABEL=`basename $FILE` fi - + # Associated warning level? WARNING="file${COUNTER}_warning" eval WARNING=\$$WARNING @@ -44,7 +44,7 @@ case $1 in echo "file_$COUNTER.min 0" let COUNTER=COUNTER+1 done; - + echo "graph_order $GRAPH_ORDER" echo "graph_title File age" echo 'graph_args --base 1000 -l 0' @@ -63,14 +63,14 @@ while [ $COUNTER -gt 0 ]; do if [ "$FILE" == "" ]; then break; fi - - # If the file isn't readable, say it's zero. + + # If the file isn't readable, say it's zero. if [ ! -r "$FILE" ]; then VALUE=0 else VALUE=$(($(date +%s) - $(stat -c '%Y' "$FILE"))) fi - + echo "file_$COUNTER.value $VALUE" let COUNTER=COUNTER+1 done; diff --git a/plugins/disk/hdsentinel b/plugins/disk/hdsentinel old mode 100644 new mode 100755 index dfb1dc36..f4bd3735 --- a/plugins/disk/hdsentinel +++ b/plugins/disk/hdsentinel @@ -1,7 +1,7 @@ #!/bin/bash # -*- sh -*- # -# Multigraph plugin to monitor harddrive temperature, condition, +# Multigraph plugin to monitor harddrive temperature, condition, # performance and estimated remaining lifetime through HDSentinel. # # To use, download the latest HDSentinel for Linux x86 or x64 from diff --git a/plugins/disk/hp_temp b/plugins/disk/hp_temp index 3c524f81..baa850a4 100755 --- a/plugins/disk/hp_temp +++ b/plugins/disk/hp_temp @@ -26,7 +26,7 @@ if ($mode eq 'autoconf') if ($mode eq 'config') { - # headers for the temperatur + # headers for the temperature print "graph_title HP Temperature sensors in Celsius\n"; print "graph_args --base 1000 -l 0\n"; print "graph_vlabel degrees Celsius\n"; @@ -77,7 +77,7 @@ while () $name =~ s/\ /_/g; # add the ID to be 100% unique $name .= '_'.$id; - + if ($mode eq 'config') { # only needed here in config @@ -95,7 +95,7 @@ while () } # calc warning from threshold, 5% less my $warning = sprintf("%.0f", $threshold * 95 / 100); - + print $name.".label ".$location."\n"; print $name.".warning ".$warning."\n"; print $name.".critical ".$threshold."\n"; diff --git a/plugins/disk/hpasmcli2_ b/plugins/disk/hpasmcli2_ index 069caffe..f27c48e9 100755 --- a/plugins/disk/hpasmcli2_ +++ b/plugins/disk/hpasmcli2_ @@ -3,7 +3,7 @@ # Plugin to monitor Proliant server health status using hpasmcli. # # Config variables: -# user root -- requrired by hpasmcli +# user root -- required by hpasmcli # env.hpasmcli -- path to hpasmcli executable (optional) # env.degree -- Unit of temperatures (C or F / default value is C) # @@ -60,7 +60,6 @@ if (defined($ARGV[0])) { my @chk_result = `$hpasmcli -s \"help\"`; if ($? eq "0") { print "yes\n"; - exit 0; } else { my $reason = 'Unknown error'; foreach my $line (@chk_result) { @@ -71,12 +70,11 @@ if (defined($ARGV[0])) { } } print "no ($reason)\n"; - exit 1; } } else { print "no (hpasmcli not found)\n"; - exit 1; } + exit 0; } elsif ($ARGV[0] eq 'suggest') { print "temp\nfans\n"; exit 0; diff --git a/plugins/disk/linux_diskstat_ b/plugins/disk/linux_diskstat_ index de522d38..9a6131b3 100755 --- a/plugins/disk/linux_diskstat_ +++ b/plugins/disk/linux_diskstat_ @@ -147,12 +147,11 @@ if ( defined $ARGV[0] && $ARGV[0] eq 'autoconf' ) { if ( !$@ && keys %stats ) { print "yes\n"; - exit 0; } else { print "no\n"; - exit 1; } + exit 0; } @@ -498,7 +497,7 @@ sub read_sysfs { $cur_device =~ tr#!#/#; # Faking missing diskstats values - unshift @elems, ( '', '', $cur_device ); + unshift @elems, ( -1, -1, $cur_device ); push @lines, \@elems; diff --git a/plugins/disk/lvm_snap_used b/plugins/disk/lvm_snap_used index 91a34012..4be838a7 100755 --- a/plugins/disk/lvm_snap_used +++ b/plugins/disk/lvm_snap_used @@ -3,7 +3,7 @@ # Plugin to monitor the % of allocated area of a LVM snapshot # # Parameters: -# +# # config # autoconf # @@ -12,7 +12,7 @@ # #%# family=auto #%# capabilities=autoconf -# +# # 2011/05/20 - pmoranga - initial version # # 2012/01/27 - Sébastien Gross @@ -26,16 +26,16 @@ lvdisplay=$(which lvdisplay) if [ "$1" = "autoconf" ]; then if test -n "${lvdisplay}"; then echo yes - exit 0 + else + echo "no (lvdisplay not found)" fi - echo "no lvdisplay found" - exit 1 + exit 0 fi if [ "$1" = "config" ]; then - echo 'graph_title Allocated space for snapshot' + echo 'graph_title Allocated space for snapshot' echo 'graph_vlabel %' echo 'graph_category disk' echo 'graph_args -l 0 -u 100 -r' @@ -48,7 +48,7 @@ ${lvdisplay} -C | awk '$3 ~ /^s/{print}' | while read line; do origin="$(echo $line | awk '{print $5}')" origin="$(clean_fieldname "$origin")" percent="$(echo $line | awk '{print $6}')" - + if [ "$1" = "config" ]; then echo "$id.label $name snapshot of $origin" else diff --git a/plugins/disk/md_iostat_ b/plugins/disk/md_iostat_ index 33d5a472..cc7f8928 100755 --- a/plugins/disk/md_iostat_ +++ b/plugins/disk/md_iostat_ @@ -96,10 +96,10 @@ $mdstat_present = -f '/proc/mdstat'; if ( defined($ARGV[0]) and $ARGV[0] eq "autoconf") { if ($mdstat_present and ($detailed_present or $stat_present)) { print "yes\n"; - exit 0; + } else { + print "no\n"; } - print "no\n"; - exit 1; + exit 0; } my %devs; diff --git a/plugins/disk/megaraid-controller-information b/plugins/disk/megaraid-controller-information index fdb19eec..67e442e0 100755 --- a/plugins/disk/megaraid-controller-information +++ b/plugins/disk/megaraid-controller-information @@ -1,77 +1,77 @@ #!/usr/bin/perl -w # -# Munin plugin for MegaRAID +# Munin plugin for MegaRAID # This plugin can graph:- Currently Drive Temperature and Error Count -# +# #--------------------- # Examples # Create a symbolic link to MegaRaid__ # ln -s /usr/share/munin/plugins/MegaRaid_ /etc/munin/plugins/MegaRaid_0_temp # graph temperature on adapter 0 -# +# # ln -s /usr/share/munin/plugins/MegaRaid_ /etc/munin/plugins/MegaRaid_0_error # graph media errors on adapter 0 -# +# # ln -s /usr/share/munin/plugins/MegaRaid_ /etc/munin/plugins/MegaRaid_1_temp # graph temperature on adapter 1 -# +# #--------------------- # Log # Revision 0.1 2011/04/16 idobson # -First version only basic support of the MegaRaid controller -# +# # Revision 0.2 2011/04/17 fkatzenb # -Added bash statement to remove the log file created each time MegaCli64 is ran # -Added a few comments and visual changes -# +# # Revision 1.0 2011/04/17 fkatzenb # -Revamped Code to symbolic link for sensor type and future growth -# +# # Revision 1.1 2011/04/17 fkatzenb # -Revised scalling -# +# # Revision 1.2 2011/04/28 fkatzenb # -Added support for graph_info support # -Added warning & critical alerts support # -Added data info -# +# # Revision 2.0 2011/04/29 fkatzenb # -Added remaining support for SMART Errors -# +# # Revision 2.1 2011/04/29 fkatzenb # -Added version information for in the graph description -# -# +# +# #--------------------- -# +# # Add the following to your /etc/munin/plugin-conf.d/munin-node: -# +# # [MegaRaid_*] # user root -# +# #--------------------- -# -# +# +# # Magic markers (optional - used by munin-config and installation scripts): -# +# #%# family=auto #%# capabilities=autoconf -# +# my $DisplayVer=2.1; use strict; use warnings; my $DevID=0; #Device Number found -my $DevData=0; #Device Data found +my $DevData=0; #Device Data found # Parse out Adapter number and parameter desired from file name and remove whitespace -my $Parameters=`basename $0 | sed 's/^MegaRaid_//g' | tr '_' '-'` ; +my $Parameters=`basename $0 | sed 's/^MegaRaid_//g' | tr '_' '-'` ; chomp $Parameters; my ($Adapter,$Type)=split(/-/,$Parameters); # Locate MegaCli64 application and remove whitespace -my $Command=`which MegaCli64`; +my $Command=`which MegaCli64 megacli | head -n 1`; chomp $Command; # Use this to define future parameters to monitor @@ -126,15 +126,14 @@ my %config = ( if ($ARGV[0] and $ARGV[0] eq "autoconf" ) { if (-e $Command ) { print "yes\n"; - exit 0; } else { - print "no\n"; - exit 1 + print "no\n"; } + exit 0; } #Read Output of MegaRaid command -$Command.=" -PDList -a".$Adapter; +$Command.=" -PDList -a".$Adapter; my @Output=qx($Command); #Munin Config Options @@ -145,14 +144,14 @@ if ($ARGV[0] and $ARGV[0] eq "config"){ print "graph_scale yes\n"; print "graph_category disk\n"; print "graph_info $config{$Type}->{description}
Generated by MegaRaid_, Version $DisplayVer
\n"; - + foreach my $Line (@Output) { $Line=~ s/\r//g; $Line=~ s/\n//g; - + #Find the device ID - if ( $Line=~ m/Slot Number: /i ) { - $DevID=$Line; + if ( $Line=~ m/Slot Number: /i ) { + $DevID=$Line; $DevID=~ s/Slot Number: //; print "A".$Adapter."_D".$DevID."_$Type.label A$Adapter:D$DevID $config{$Type}->{label}\n"; print "A".$Adapter."_D".$DevID."_$Type.info Adapter: $Adapter / Drive: $DevID - $config{$Type}->{info_tag}\n"; @@ -161,9 +160,9 @@ if ($ARGV[0] and $ARGV[0] eq "config"){ } if ($config{$Type}->{critical} ne '') { print "A".$Adapter."_D".$DevID."_$Type.critical $config{$Type}->{critical}\n"; - } + } } - } + } exit 0; } @@ -176,7 +175,7 @@ foreach my $Line (@Output) { if ( $Line=~ m/Slot Number: /i ) { $DevID=$Line; $DevID=~ s/Slot Number: //; chomp $DevID; } #Find the data and print it out - if ( $Line=~ m/$config{$Type}->{lookfor}/i ) { + if ( $Line=~ m/$config{$Type}->{lookfor}/i ) { $DevData=$Line; $DevData=~s/$config{$Type}->{lookfor}//; $DevData=~s/C.*//; diff --git a/plugins/disk/megaraid-hdd-temperature-using-megacli b/plugins/disk/megaraid-hdd-temperature-using-megacli index bb45ee9c..261ba7c4 100755 --- a/plugins/disk/megaraid-hdd-temperature-using-megacli +++ b/plugins/disk/megaraid-hdd-temperature-using-megacli @@ -1,8 +1,8 @@ #!/bin/bash # Plugin to monitor harddrive temperatures connected to a MegaRAID controller -# -# Plugin must be ran as root so add these configuration in +# +# Plugin must be ran as root so add these configuration in # /etc/munin/plugin-conf.d/munin-node. # # [megacli*] diff --git a/plugins/disk/nvme b/plugins/disk/nvme new file mode 100755 index 00000000..2bb585bc --- /dev/null +++ b/plugins/disk/nvme @@ -0,0 +1,273 @@ +#! /usr/bin/perl +# -*- mode: perl; perl-indent-level: 4 -*- + +=head1 NAME + +nvme - Munin plugin to monitor the use of NVMe devices + +=head1 APPLICABLE SYSTEMS + +Linux systems with NVMe (Non-Volatile Memory storage attached via PCIe +bus). + +=head1 CONFIGURATION + +The plugin uses nvme(1) from the nvme-cli project to read status from +the NVMe devices. This requires root access. + + [nvme] + user root + +The plugin does not support alerting. + +=head1 INTERPRETATION + +This is a multigraph plugin which makes three graphs. + +=head2 nvme_usage + +This reports how much of capacity is allocated in each NVMe +"namespace". The report is in percent. This number may not have much +relation to actual use, e.g., if deleted data areas have not been +trimmed/discarded. + +=head2 nvme_bytes + +This reports read and write activity on each NVMe device, in bytes per +second. Ideally there should be much more read than write. If they +are symmetrical, you are using your NVMe as a very expensive FIFO, and +if you write more than you read, you should probably look for archival +storage instead. + +It is a good idea to compare these numbers to I/O counters from +diskstats. If they are much higher, look into whether the write +amplification can be due to suboptimal I/O request sizes. + +=head2 nvme_writecycles + +This graphs is intended to give an indication of how much life there +is left in your NVMe. It calculates the number of bytes written +during each device's lifetime against the capacity of the device, +thereby getting an average number of write cycle each cell has +experienced. + +A prosumer NVMe will handle a few thousand writes to each cell before +the error rate gets out of hand. + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 BUGS + +None known. + +=head1 VERSION + + 1.0 + +=head1 AUTHOR + +Kjetil Torgrim Homme + +=head1 LICENSE + +GPLv2 + +=cut + +use strict; +use Munin::Plugin; +use IPC::Cmd qw(can_run); + +# Check that multigraph is supported +need_multigraph(); + +# Return undef if no problem, otherwise explanation +sub autoconf_problem { + return if can_run('nvme'); + if (open(my $mods, '/proc/modules')) { + while (<$mods>) { + return "missing nvme(1)" if /^nvme[^a-z]/; + } + close($mods); + } + return "missing nvme"; # vague message for non-Linux +} + +sub run_nvme { + my (@cmd) = @_; + my @lines; + if (can_run('nvme') && open(my $nvme, '-|', 'nvme', @cmd)) { + @lines = <$nvme>; + close($nvme); + warn "nvme: probably needs to run as user root\n" if $? && $> != 0; + } + @lines; +} + +sub human_to_bytes { + my ($str) = @_; + my %units = ( + kB => 1000, + MB => 1000_000, + GB => 1000_000_000, + TB => 1000_000_000_000, + PB => 1000_000_000_000_000, # I wish I had need for this + ); + $str =~ /(\d+(\.\d+)?)\s+(.B)/; + int($1 * $units{$3}); +} + +sub nvme_list { + # Node SN Model Namespace Usage Format FW Rev + # ---------------- -------------------- ---------------------------------------- --------- -------------------------- ---------------- -------- + # /dev/nvme1n1 S464NB0K601188N Samsung SSD 970 EVO 2TB 1 695.50 GB / 2.00 TB 512 B + 0 B 1B2QEXE7 + my %devices; + + my $recognised_output; + my $lineno = 0; + for (run_nvme('list')) { + ++$lineno; + if (m:^Node\s+SN\s+Model\s+Namespace Usage:) { + ++$recognised_output; + } elsif (m:^(/\S+)\s+(\S+)\s+(\S.*\S)\s{3,}(\d+)\s+(\S+\s+.B)\s+/\s+(\S+\s+.B):) { + $devices{$2} = { + device => $1, + sn => $2, + model => $3, + namespace => $4, + usage => human_to_bytes($5), + capacity => human_to_bytes($6), + }; + } elsif ($lineno > 2) { + # could not parse device information + $recognised_output = 0; + } + } + if ($lineno && !$recognised_output) { + warn "Could not recognise output from 'nvme list', please report\n"; + } + \%devices; +} + +sub smart_log { + my ($dev) = @_; + my %info; + for (run_nvme('smart-log', $dev)) { + next if /^Smart Log/; + if (/(.*?)\s+:\s+(.*)/) { + my ($var, $value) = ($1, $2); + $var =~ s/\s/_/g; + if ($value =~ /^\d+(,\d\d\d)+$/) { + $value =~ s/,//g; + } + $info{lc $var} = $value; + } + } + return \%info; +} + +use Data::Dumper; + +my $mode = ($ARGV[0] or "print"); + +my $problem = autoconf_problem(); +my $list = nvme_list(); + +if ($mode eq 'autoconf') { + if (keys %{$list}) { + print "yes\n"; + } else { + printf("no (%s)\n", $problem || "no devices to monitor"); + } + exit 0; +} + +my @sn = sort keys %{$list}; + +if ($mode eq 'config') { + my $sn_list = join(' ', @sn); + + print <<'EOF'; +multigraph nvme_usage +graph_title NVME Namespace Usage +graph_order $sn_list +graph_vlabel Percent used +graph_scale no +graph_category disk +graph_info How much space is used +EOF + for (@sn) { + print <<"EOF"; +$_.label $list->{$_}->{device} used +$_.type GAUGE +$_.max 100 +$_.min 0 +EOF + } + print <<'EOF'; +multigraph nvme_bytes +graph_title NVME Bytes Read / Written +graph_order $sn_list +graph_vlabel bytes read (-) / written (+) per ${graph_period}' +graph_category disk +graph_info How much data is read and written +graph_period second +EOF + for (@sn) { + print <<"EOF"; +${_}_r.label $list->{$_}->{device} +${_}_r.type DERIVE +${_}_r.min 0 +${_}_r.graph no +${_}_w.label $list->{$_}->{device} +${_}_w.type DERIVE +${_}_w.min 0 +${_}_w.negative ${_}_r +EOF + } + print <<'EOF'; +multigraph nvme_writecycles +graph_title NVME Write Cycles +graph_order $sn_list +graph_vlabel Cycles +graph_args --logarithmic +graph_category disk +graph_info How much data has been written in lifetime divided by capacity +EOF + for (@sn) { + print <<"EOF"; +$_.label $list->{$_}->{device} write cycles +$_.type GAUGE +$_.min 0 +EOF + } +} else { + for (@sn) { + $list->{$_}->{smart} = smart_log($list->{$_}->{device}); + } + print "multigraph nvme_usage\n"; + for (@sn) { + my $info = $list->{$_}; + my $used = 100 * $info->{usage} / $info->{capacity}; + print "$_.value $used\n"; + } + print "multigraph nvme_bytes\n"; + for (@sn) { + my $info = $list->{$_}; + my $rbytes = $info->{smart}->{data_units_read}; + my $wbytes = $info->{smart}->{data_units_written}; + print "${_}_r.value $rbytes\n"; + print "${_}_w.value $wbytes\n"; + } + print "multigraph nvme_writecycles\n"; + for (@sn) { + my $info = $list->{$_}; + + # The unit size reported is 1000 blocks. + my $cycles = $info->{smart}->{data_units_read} * 512_000 / $info->{capacity}; + print "$_.value $cycles\n"; + } +} diff --git a/plugins/disk/quota2percent_ b/plugins/disk/quota2percent_ old mode 100644 new mode 100755 index 6b3e6c62..b8784d50 --- a/plugins/disk/quota2percent_ +++ b/plugins/disk/quota2percent_ @@ -28,18 +28,18 @@ You could define two alert levels, the graph language, min. human UID and dealin env.humanuid [value] (default: 1000, only need if there is an other value define for UID_MIN in /etc/login.defs) env.low_uid [never|no|yes] (default: never) set to no for producing rrd files for system user, but don't show those graphs (e.g. for later analyses) - if set to yes system user graphs are drawn + if set to yes system user graphs are drawn =head1 DESCRIPTION -Wild card Plugin for monitoring the utilization of devices with quota rules. -A graph is drawn for each user, which shows the usage as a percentage of his hard limit. System accounts (UID <1000) are suppressed. -In addition, a graph is displayed which indicates the ratio device size to device coverage. -The script repqutoa, usually part of the package quota, is needed. +Wild card Plugin for monitoring the utilization of devices with quota rules. +A graph is drawn for each user, which shows the usage as a percentage of his hard limit. System accounts (UID <1000) are suppressed. +In addition, a graph is displayed which indicates the ratio device size to device coverage. +The script repqutoa, usually part of the package quota, is needed. The plugin itself can be stored in any directory. For example, the device sdb1 shell be monitored, a symbolic link must be created -in the /etc/munin/plugins/ directory as follows: +in the /etc/munin/plugins/ directory as follows: -=over +=over I<<< ln -s //quota2percent_ quota2percent_sdb1 >>> @@ -72,7 +72,7 @@ V17.0214 add example graph for Munin Plugin Gallery remove setting a PATH - remove German comments + remove German comments V17.0124 @@ -83,7 +83,7 @@ V17.0124 Jo Hartmann =head1 LICENSE - + GPLv2 (L) =cut @@ -111,7 +111,7 @@ GPLv2 (L) # Preparation section # ################################################### -# Load munin's shell libary +# Load munin's shell library . "$MUNIN_LIBDIR/plugins/plugin.sh" # if any fetch from munin-node file @@ -142,7 +142,7 @@ GPLv2 (L) ################################################### # Reading the quotes for the selected device, using repquota - if repquota "/dev/$Id" &> /dev/null; then + if repquota "/dev/$Id" &> /dev/null; then readarray Quotas < <( repquota "/dev/$Id" | grep " -- " ) else echo "No limitations administered via 'quota' for $Id" >&2 @@ -151,13 +151,13 @@ GPLv2 (L) # the avoidance of a divide-by-zero error Quotas[0]="root -- 1 1 1 1 1 1" - # no rrd file need + # no rrd file need Low_UID="never" fi readarray Totals < <( df "/dev/$Id" ) -# Get the count of Users +# Get the count of Users Users=${#Quotas[@]} @@ -167,18 +167,18 @@ GPLv2 (L) if [ "$1" = "config" ]; then - # Localisation of the graphic texts + # Localisation of the graphic texts case $Language in de) echo "graph_title Quota-Hard-Limit von $Id" echo "graph_vlabel Nutzung in % Hardlimit" - echo "graph_info Die Grafik zeigt die Belegung des durch Quota reglementierten Speicherplatzes für alle regulären Nutzer (UID >= $Min_UID) in Prozent des Hardlimits." + echo "graph_info Die Grafik zeigt die Belegung des durch Quota reglementierten Speicherplatzes für all regulären Nutzer (UID >= $Min_UID) in Prozent des Hardlimits." Total_txt="Su. aller Nutzer" Total_info="Inklusive Systemnutzer (UID < $Min_UID)" ;; - es) - echo "graph_title Cuota de límite absoluto de $Id" - echo "graph_vlabel el % de uso del límite duro" + es) + echo "graph_title Cuota de límite absoluto de $Id" + echo "graph_vlabel el % de uso del límite duro" echo "graph_info El gráfico muestra la disponibilidad de espacio regulado por cuotas para todos los usuarios regulares (UID >= $Min_UID) como porcentaje de límites duros." Total_txt="Suma de todos los usuarios " Total_info="La inclusión de usuario del sistema (UID < $Min_UID) " @@ -188,11 +188,11 @@ GPLv2 (L) echo "graph_vlabel Usage in %" echo "graph_info The graphic shows the allocation of the quota-regulated storage space for all regular users (UID >= $Min_UID) as a percentage of the hard limit ." Total_txt="all users" - Total_info="system users (UID < $Min_UID) included" + Total_info="system users (UID < $Min_UID) included" ;; esac - # Defaults configuration + # Defaults configuration echo "graph_category disk" echo "graph_args --lower-limit 0 --upper-limit 100" echo "graph_printf %5.2lf %%" @@ -208,17 +208,17 @@ GPLv2 (L) # Determine the currently processing UID Cur_UID="$(id -u "$User")" - # skip if actual user a system user und low_uid ist set to never + # skip if actual user a system user und low_uid is set to never [ "$Cur_UID" -lt "$Min_UID" ] && [ "$Low_UID" = "never" ] && continue - - # No graph for none human uid if low_uid ist set to no + + # No graph for none human uid if low_uid is set to no [ "$Cur_UID" -lt "$Min_UID" ] && echo "$Fieldname.graph $Low_UID" # configure the user lines echo "$Fieldname.label $User" echo "$Fieldname.warning $Warning" echo "$Fieldname.critical $Critical" - + done # configure the total line and send exit code NO ERROR happens @@ -238,15 +238,15 @@ GPLv2 (L) Quota=( ${Quotas[$i]} ) Fieldname="$(clean_fieldname "${Quota[0]}")" - # skip if actual user a system user und low_uid ist set to never + # skip if actual user a system user und low_uid is set to never [ "$Cur_UID" -lt "$Min_UID" ] && [ "$Low_UID" = "never" ] && continue - # write the result zu munin + # write the result zu munin echo "${Quota[2]} ${Quota[4]} $Fieldname.value" | awk '{printf "%s %f\n",$3,$1*100/$2}' done -# the value for the total line +# the value for the total line Total=( ${Totals[1]} ) echo "${Total[2]} ${Total[1]} total.value" | awk '{printf "%s %f\n",$3,$1*100/$2}' diff --git a/plugins/disk/raid b/plugins/disk/raid index 88f99dfe..c02a1689 100755 --- a/plugins/disk/raid +++ b/plugins/disk/raid @@ -1,11 +1,11 @@ #!/usr/bin/perl -w -# +# # (c) 2007 Nathan Rutman nathan@clusterfs.com -# -# Plugin to monitor RAID status +# +# Plugin to monitor RAID status # # Results are % of healthy drives in a raid device -# and % rebuilt of devices that are resyncing. +# and % rebuilt of devices that are resyncing. # #%# family=contrib #%# capabilities=autoconf @@ -13,11 +13,10 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { if (-r "/proc/mdstat" and `grep md /proc/mdstat`) { print "yes\n"; - exit 0; } else { - print "no RAID devices\n"; - exit 1; + print "no (no RAID devices found)\n"; } + exit 0; } if ( $ARGV[0] and $ARGV[0] eq "config" ) { @@ -42,7 +41,7 @@ my($devinfo_re, $devstat_re, $action_re) = ( # Interestingly, swap is presented as "active (auto-read-only)" # and mdadm has '--readonly' option to make the array 'active (read-only)' -my($dev, $ro, $type, $members, $nmem, $nact, $status, $action, $proc, $minute); +my($dev, $ro, $type, $members, $failed, $nmem, $nact, $status, $action, $proc, $minute); while (@text) { my $line = shift @text; if ($line =~ /$devinfo_re/) { @@ -51,7 +50,10 @@ while (@text) { $ro = $2 || ''; $type = $3; $members = $4; - + $failed = $members; + $failed =~ s/[^F]+//g; + $failed = length($failed); + $line = shift @text; if ($line =~ /$devstat_re/) { # second line should like "123456 blocks super 1.2 [2/2] [UU]" @@ -63,7 +65,7 @@ while (@text) { # second line did not exist on /proc/mdstat next; } - + $line = shift @text; if ($line =~ /$action_re/) { # third line should like " [==>..................] check = 10.0% (12345/123456) finish=123min speed=12345/sec" @@ -104,6 +106,9 @@ while (@text) { print $dev, "_rebuild.critical 98:\n"; print $dev, "_check.label $dev check/resync \n"; print $dev, "_check.info $action $minute\n"; + print $dev, "_failed.label $dev failed disks \n"; + print $dev, "_failed.info $action $minute\n"; + print $dev, "_failed.critical 0:0\n"; } else { my $pct = 100 * $nact / $nmem; my $rpct = 100; @@ -127,6 +132,7 @@ while (@text) { print "$dev.value $pct\n"; print $dev, "_rebuild.value $rpct\n"; print $dev, "_check.value $cpct\n"; + print $dev, "_failed.value $failed\n"; } } diff --git a/plugins/disk/raid-mismatch-count b/plugins/disk/raid-mismatch-count index e84c1dd1..d51c8901 100755 --- a/plugins/disk/raid-mismatch-count +++ b/plugins/disk/raid-mismatch-count @@ -3,22 +3,22 @@ # Copyright (C) 2011 Rory Jaffe # derived from md_sync_speed by Kristian Lyngstøl # Copyright (C) 2010 Kristian Lyngstøl -# +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ##### -# +# # Magic Markers: # #%# family=auto # #%# capabilities=autoconf @@ -30,14 +30,12 @@ returnval=$? if [ "x$1" = "xautoconf" ]; then if [ -z "$targets" ]; then echo "no (no md devices found under /sys/devices/virtual/block/*/md/mismatch_cnt)" - exit 1; elif [ "x$returnval" != "x0" ]; then echo "no (discovery of md devices failed strangely)" - exit 1; else echo "yes" - exit 0 fi + exit 0 fi if [ "x$1" = "xconfig" ]; then diff --git a/plugins/disk/scsi_queue b/plugins/disk/scsi_queue index e1c14cf2..0cbf165b 100755 --- a/plugins/disk/scsi_queue +++ b/plugins/disk/scsi_queue @@ -1,7 +1,7 @@ #!/usr/bin/env python """ -Munin plugin which reports queue busy-values per online SCSI +Munin plugin which reports queue busy-values per online SCSI device on Linux, as seen in /proc/scsi/sg/devices If the busy-values often reach the queue depth of the device, @@ -11,7 +11,7 @@ plugin. Wildcard use: If your system has many SCSI-like devices, filtering may be needed to make the resulting graphs readable. -If you symlink the plugin, so that it's executed as +If you symlink the plugin, so that it's executed as scsi_queue_X_through_Y then the plugin will only look at devices /dev/sdX .. /dev/sdY @@ -27,7 +27,7 @@ X and Y are translated into a regular expression like: # Released according to the "New BSD License" AKA the 3-clause # BSD License: -# ==================================================================== +# ==================================================================== # Copyright (c) 2010, Danish National Board of Health. # All rights reserved. # @@ -52,7 +52,7 @@ X and Y are translated into a regular expression like: # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ==================================================================== +# ==================================================================== # $Id: scsi_queue 13630 2010-08-31 15:29:14Z tra $ @@ -144,10 +144,10 @@ def map_procentries_to_devices(list_of_dicts,devfilter_regex): # 2:0:0:0 # 3:0:0:0 # - # The colon-separated values map to the first four parts + # The colon-separated values map to the first four parts # of /proc/scsi/sg/devices - # And the directory entries are symlinks which point to directories - # in /sys/devices. By following a symlink, we may end up in + # And the directory entries are symlinks which point to directories + # in /sys/devices. By following a symlink, we may end up in # a directory which contains directory entries like: # - block:sdb # ... @@ -162,7 +162,7 @@ def map_procentries_to_devices(list_of_dicts,devfilter_regex): # Search for dirent called block:SOMETHING # Put SOMETHING into blockdev_name - # Couldn't make glob.glob() work: The length of the result + # Couldn't make glob.glob() work: The length of the result # of glob() returned TypeError: len() of unsized object on # RHEL 5's python... dirents = os.listdir(sys_pathname) diff --git a/plugins/disk/snmp__areca_ b/plugins/disk/snmp__areca_ index dbb41454..e694fd79 100755 --- a/plugins/disk/snmp__areca_ +++ b/plugins/disk/snmp__areca_ @@ -189,7 +189,7 @@ def snmpwalk(root): # Decode SNMP response rsp.decode(answer) - + # Make sure response matches request (request IDs, communities, etc) if req != rsp: raise 'Unmatched response: %s vs %s' % (str(req), str(rsp)) diff --git a/plugins/disk/snmp__hp_temp b/plugins/disk/snmp__hp_temp index c943ef58..8716370e 100755 --- a/plugins/disk/snmp__hp_temp +++ b/plugins/disk/snmp__hp_temp @@ -4,7 +4,7 @@ # # Munin-plugin to monitor temperature on HP-servers. # Uses SNMP, and needs hpasmd. -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, diff --git a/plugins/disk/xfs b/plugins/disk/xfs index 7f32746c..55919038 100755 --- a/plugins/disk/xfs +++ b/plugins/disk/xfs @@ -20,13 +20,12 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") if (-r "/proc/fs/xfs/stat") { print "yes\n"; - exit 0; } else { - print "/proc/fs/xfs/stat not found\n"; - exit 1; + print "no (/proc/fs/xfs/stat not found)\n"; } + exit 0; } my %runtime_stats = ( @@ -94,7 +93,7 @@ while () { close(IN); foreach my $func ( keys %config ) { - print "multigraph xfs_$func\n"; + print "multigraph xfs_$func\n"; if (defined $ARGV[0] and $ARGV[0] eq 'config') { print "graph_title $config{$func}->{title}\n"; diff --git a/plugins/disk/xfs_frag b/plugins/disk/xfs_frag index 7218934a..cb3d9843 100755 --- a/plugins/disk/xfs_frag +++ b/plugins/disk/xfs_frag @@ -32,22 +32,22 @@ Paul Saunders L =cut -declare -a ARRY +declare -a TOKENS shopt -s nocasematch case $1 in config) - cat <<'EOF' + cat <<'EOF' graph_title XFS fragmentation graph_vlabel Percent graph_category disk EOF - cat /etc/mtab | awk '{print $2 " " $3}' | while read LINE - do - ARRY=($LINE) - if [[ ${ARRY[1]} =~ xfs ]]; then - FIELDNAME=$(echo ${ARRY[0]} | sed 's/^[^A-Za-z_]/_/; s/[^A-Za-z0-9_]/_/g') - echo "$FIELDNAME.label ${ARRY[0]}" + awk '{print $2 " " $3}' host. + +This wildcard plugin provides at the moment only the suffixes C, C, C, +C, C, C and C. + +=head1 INSTALLATION + +- Copy this plugin in your munin plugins directory +- Install Python3 "docker" package + +=over 2 + + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_containers + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_cpu + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_images + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_memory + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_network + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_status + ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_volumes + +=back + +After the installation you need to restart your munin-node: + +=over 2 + + systemctl restart munin-node + +=back + +=head1 CONFIGURATION + +This plugin need to run as root, you need to create a file named docker placed in the +directory /etc/munin/plugin-conf.d/ with the following config (you can also use +Docker environment variables here as described in +https://docs.docker.com/compose/reference/envvars/): + +You can use the EXCLUDE_CONTAINER_NAME environment variable to specify a regular expression +which if matched will exclude the matching containers from the memory and cpu graphs. + +For example + +env.EXCLUDE_CONTAINER_NAME runner + +Would exclude all containers with the word "runner" in the name. + + +=over 2 + + [docker_*] + user root + env.DOCKER_HOST unix://var/run/docker.sock + env.EXCLUDE_CONTAINER_NAME regexp + +=back + +=head1 AUTHORS + +This section has been reverse-engineered from git logs + +* Codimp : original rewrite +* Rowan Wookey : performance improvement +* Olivier Mehani : Network support, ClientWrapper, gerenal + cleanup + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf suggest + +""" + +import os +import sys +import re +try: + from functools import cached_property +except ImportError: + # If cached_property is not available, + # just use the property decorator, without caching + # This is for backward compatibility with Python<3.8 + cached_property = property +from multiprocessing import Process, Queue + + +def sorted_by_creation_date(func): + def sorted_func(*args, **kwargs): + return sorted( + func(*args, **kwargs), + key=( + lambda x: x.attrs['CreatedAt'] + if 'CreatedAt' in x.attrs + else x.attrs['Created'] + ) + ) + return sorted_func + + +class ClientWrapper: + """ + A small wrapper for the docker client, to centralise some parsing logic, + and support caching. + + In addition, when the exclude_re parameter is not None, + any container which name is matched by the RE will not be excluded from reports. + """ + client = None + exclude = None + + def __init__(self, client, exclude_re=None): + self.client = client + if exclude_re: + self.exclude = re.compile(exclude_re) + + @cached_property + @sorted_by_creation_date + def containers(self): + return self.client.containers.list() + + @cached_property + @sorted_by_creation_date + def all_containers(self): + return [c for c in self.client.containers.list(all=True) + if not self.exclude + or not self.exclude.search(c.name)] + + @cached_property + @sorted_by_creation_date + def intermediate_images(self): + return list( + set(self.all_images) + .difference( + set(self.images) + .difference( + set(self.dangling_images) + ) + ) + ) + + @cached_property + @sorted_by_creation_date + def all_images(self): + return self.client.images.list(all=True) + + @cached_property + @sorted_by_creation_date + def images(self): + images = self.client.images.list() + return list( + set(images) + .difference( + set(self.dangling_images)) + ) + + @cached_property + @sorted_by_creation_date + def dangling_images(self): + return self.client.images.list(filters={'dangling': True}) + + @cached_property + @sorted_by_creation_date + def volumes(self): + return self.client.volumes.list() + + +def container_summary(container): + summary = container.name + attributes = container_attributes(container) + if attributes: + summary += f' ({attributes})' + return summary + + +def container_attributes(container): + attributes = container.image.tags + attributes.append(container.attrs['Created']) + return ', '.join(attributes) + + +def print_containers_status(client): + running = [] + paused = [] + created = [] + restarting = [] + removing = [] + exited = [] + dead = [] + for container in client.all_containers: + if container.status == 'running': + running.append(container) + elif container.status == 'paused': + paused.append(container) + elif container.status == 'created': + created.append(container) + elif container.status == 'restarting': + restarting.append(container) + elif container.status == 'removing': + removing.append(container) + elif container.status == 'exited': + exited.append(container) + elif container.status == 'dead': + dead.append(container) + print('running.value', len(running)) + print('running.extinfo', ', '.join(container_summary(c) for c in running)) + print('paused.value', len(paused)) + print('paused.extinfo', ', '.join(container_summary(c) for c in paused)) + print('created.value', len(created)) + print('created.extinfo', ', '.join(container_summary(c) for c in created)) + print('restarting.value', len(restarting)) + print('restarting.extinfo', ', '.join(container_summary(c) for c in restarting)) + print('removing.value', len(removing)) + print('removing.extinfo', ', '.join(container_summary(c) for c in removing)) + print('exited.value', len(exited)) + print('exited.extinfo', ', '.join(container_summary(c) for c in exited)) + print('dead.value', len(dead)) + print('dead.extinfo', ', '.join(container_summary(c) for c in dead)) + + +def image_summary(image): + attributes = image.tags + attributes.append(image.attrs['Created']) + attributes.append(f"{round(image.attrs['Size']/1024**2, 2)} MiB") + return f"{image.short_id} ({', '.join(attributes)})" + + +def print_images_count(client): + images = client.images + intermediate = client.intermediate_images + dangling = client.dangling_images + + print('intermediate_quantity.value', len(intermediate)) + print('intermediate_quantity.extinfo', ', '.join(image_summary(i) for i in intermediate)) + print('images_quantity.value', len(images)) + print('images_quantity.extinfo', ', '.join(image_summary(i) for i in images)) + print('dangling_quantity.value', len(dangling)) + print('dangling_quantity.extinfo', ', '.join(image_summary(i) for i in dangling)) + + +def get_container_stats(container, q): + q.put(container.stats(stream=False)) + + +def parallel_container_stats(client): + proc_list = [] + stats = {} + for container in client.containers: + q = Queue() + p = Process(target=get_container_stats, args=(container, q)) + proc_list.append({'proc': p, 'queue': q, 'container': container}) + p.start() + for proc in proc_list: + proc['proc'].join() + stats[proc['container']] = proc['queue'].get() + return stats.items() + + +def print_containers_cpu(client): + for container, stats in parallel_container_stats(client): + cpu_count = len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"]) + cpu_percent = 0.0 + cpu_delta = (float(stats["cpu_stats"]["cpu_usage"]["total_usage"]) + - float(stats["precpu_stats"]["cpu_usage"]["total_usage"])) + system_delta = (float(stats["cpu_stats"]["system_cpu_usage"]) + - float(stats["precpu_stats"]["system_cpu_usage"])) + if system_delta > 0.0: + cpu_percent = cpu_delta / system_delta * 100.0 * cpu_count + print(container.name + '.value', cpu_percent) + print(container.name + '.extinfo', container_attributes(container)) + + +def print_containers_memory(client): + for container, stats in parallel_container_stats(client): + print(container.name + '.value', stats['memory_stats']['stats']['total_rss']) + print(container.name + '.extinfo', container_attributes(container)) + + +def print_containers_network(client): + for container, stats in parallel_container_stats(client): + tx_bytes = 0 + rx_bytes = 0 + for data in stats['networks'].values(): + tx_bytes += data['tx_bytes'] + rx_bytes += data['rx_bytes'] + print(container.name + '_up.value', tx_bytes) + print(container.name + '_down.value', rx_bytes) + print(container.name + '.extinfo', container_attributes(container)) + + +def volume_summary(volume): + summary = f"{volume.short_id}" + if volume.attrs['Labels']: + summary += " ({', '.join(volume.attrs['Labels'])})" + return summary + + +def main(): + try: + mode = sys.argv[1] + except IndexError: + mode = "" + wildcard = sys.argv[0].split("docker_")[1].split("_")[0] + + try: + import docker + client = docker.from_env() + if mode == "autoconf": + client.ping() + print('yes') + sys.exit(0) + except Exception as e: + print(f'no ({e})') + if mode == "autoconf": + sys.exit(0) + sys.exit(1) + + if mode == "suggest": + print("cpu") + print("images") + print("memory") + print("network") + print("status") + print("volumes") + sys.exit(0) + + client = ClientWrapper(client, + exclude_re=os.getenv('EXCLUDE_CONTAINER_NAME')) + + if wildcard == "status": + if mode == "config": + print("graph_title Docker status") + print("graph_vlabel containers") + print("graph_category virtualization") + print("graph_total All containers") + print("running.label RUNNING") + print("running.draw AREASTACK") + print("running.info Running containers can be manipulated with " + "`docker container [attach|kill|logs|pause|restart|stop] ` or " + "commands run in them with `docker container exec " + "[--detach|--interactive,--privileged,--tty] `" + ) + print("paused.label PAUSED") + print("paused.draw AREASTACK") + print("paused.info Paused containers can be resumed with " + "`docker container unpause `") + print("created.label CREATED") + print("created.draw AREASTACK") + print("created.info New containers can be created with " + "`docker container create --name ` or " + "`docker container run --name `") + print("restarting.label RESTARTING") + print("restarting.draw AREASTACK") + print("restarting.info Containers can be restarted with " + "`docker container restart `") + print("removing.label REMOVING") + print("removing.draw AREASTACK") + print("removing.info Containers can be removed with " + "`docker container rm `") + print("exited.label EXITED") + print("exited.draw AREASTACK") + print("exited.info Exited containers can be started with " + "`docker container start [--attach] `") + print("dead.label DEAD") + print("dead.draw AREASTACK") + print("dead.warning 1") + print("dead.info Dead containers can be started with " + "`docker container start `") + else: + print_containers_status(client) + elif wildcard == "containers": + if mode == "config": + print("graph_title Docker containers") + print("graph_vlabel containers") + print("graph_category virtualization") + print("containers_quantity.label Containers") + else: + print('containers_quantity.value', len(client.containers)) + elif wildcard == "images": + if mode == "config": + print("graph_title Docker images") + print("graph_vlabel images") + print("graph_category virtualization") + print("graph_total All images") + print("intermediate_quantity.label Intermediate images") + print("intermediate_quantity.draw AREASTACK") + print("intermediate_quantity.info All unused images can be deleted with " + "`docker image prune --all`") + print("images_quantity.label Images") + print("images_quantity.draw AREASTACK") + print("images_quantity.info Images can be used in containers with " + "`docker container create --name ` or " + "`docker container run --name `") + print("dangling_quantity.label Dangling images") + print("dangling_quantity.draw AREASTACK") + print("dangling_quantity.info Dangling images can be deleted with " + "`docker image prune`" + "or tagged with `docker image tag `") + print("dangling_quantity.warning 10") + else: + print_images_count(client) + elif wildcard == "volumes": + if mode == "config": + print("graph_title Docker volumes") + print("graph_vlabel volumes") + print("graph_category virtualization") + print("volumes_quantity.label Volumes") + print("volumes_quantity.draw AREASTACK") + print("volumes_quantity.info Unused volumes can be deleted with " + "`docker volume prune`") + else: + print('volumes_quantity.value', len(client.volumes)) + print('volumes_quantity.extinfo', ', '.join(volume_summary(v) for v in client.volumes)) + elif wildcard == "cpu": + if mode == "config": + graphlimit = str(os.cpu_count() * 100) + print("graph_title Docker containers CPU usage") + print("graph_args --base 1000 -r --lower-limit 0 --upper-limit " + graphlimit) + print("graph_scale no") + print("graph_period second") + print("graph_vlabel CPU usage (%)") + print("graph_category virtualization") + print("graph_info This graph shows docker container CPU usage.") + print("graph_total Total CPU usage") + for container in client.all_containers: + print("{}.label {}".format(container.name, container.name)) + print("{}.draw AREASTACK".format(container.name)) + print("{}.info {}".format(container.name, container_attributes(container))) + else: + print_containers_cpu(client) + elif wildcard == "memory": + if mode == "config": + print("graph_title Docker containers memory usage") + print("graph_args --base 1024 -l 0") + print("graph_vlabel Bytes") + print("graph_category virtualization") + print("graph_info This graph shows docker container memory usage.") + print("graph_total Total memory usage") + for container in client.all_containers: + print("{}.label {}".format(container.name, container.name)) + print("{}.draw AREASTACK".format(container.name)) + print("{}.info {}".format(container.name, container_attributes(container))) + else: + print_containers_memory(client) + elif wildcard == "network": + if mode == "config": + print("graph_title Docker containers network usage") + print("graph_args --base 1024 -l 0") + print("graph_vlabel bits in (-) / out (+) per ${graph_period}") + print("graph_category virtualization") + print("graph_info This graph shows docker container network usage.") + print("graph_total Total network usage") + for container in client.all_containers: + print("{}_down.label {}_received".format(container.name, container.name)) + print("{}_down.type DERIVE".format(container.name)) + print("{}_down.min 0".format(container.name)) + print("{}_down.graph no".format(container.name)) + print("{}_down.cdef {}_down,8,*".format(container.name, container.name)) + print("{}_up.label {}".format(container.name, container.name)) + print("{}_up.draw LINESTACK1".format(container.name)) + print("{}_up.type DERIVE".format(container.name)) + print("{}_up.min 0".format(container.name)) + print("{}_up.negative {}_down".format(container.name, container.name)) + print("{}_up.cdef {}_up,8,*".format(container.name, container.name)) + print("{}_up.info {}".format(container.name, container_attributes(container))) + else: + print_containers_network(client) + + +if __name__ == '__main__': + main() diff --git a/plugins/docker/docker_cpu b/plugins/docker/docker_cpu index e25b7e95..62c5d080 100755 --- a/plugins/docker/docker_cpu +++ b/plugins/docker/docker_cpu @@ -62,10 +62,13 @@ for my $i (1 .. $#containers) my @fields = split / +/, $containers[$i]; my $id = $fields[0]; my $name = $fields[$#fields]; + my $label = $name; # manage container name containing arithmetic operators and dots. E.g, my-container. $name =~ s/[-\+*\/\.]/_/g; # truncate container name with "," character. $name =~ s/,.*//g; + # prefix if container starts with 0-9 + $name =~ s/^([0-9])/c$1/; if (open(my $file, '<', "/sys/fs/cgroup/cpuacct/docker/$id/cpuacct.usage")) { my $total_cpu_ns = <$file>; @@ -75,20 +78,20 @@ for my $i (1 .. $#containers) { my @ncpu = split / /, <$file>; close $file; - push @result, {'name'=>$name, 'total_cpu_ns'=>$total_cpu_ns, 'ncpu'=>$#ncpu}; + push @result, {'name'=>$name, 'label'=>$label, 'total_cpu_ns'=>$total_cpu_ns, 'ncpu'=>$#ncpu}; } } } - + if (defined $ARGV[0] and $ARGV[0] eq "config") { my $nanoSecondsInSecond=1000000000; - my $graphlimit = $result[0]{'ncpu'}; + my $graphlimit = 1; foreach(@result){ - if ($$_{'ncpu'} > $graphlimit){ + if ($$_{'ncpu'} || 1 > $graphlimit){ $graphlimit = $$_{'ncpu'}; } - } + } $graphlimit = $graphlimit * 100; print "graph_title Docker container CPU usage\n"; print "graph_args --base 1000 -r --lower-limit 0 --upper-limit $graphlimit\n"; @@ -97,10 +100,10 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") print "graph_period second\n"; print "graph_category virtualization\n"; print "graph_info This graph shows docker container CPU usage.\n"; - + foreach(@result) { - print "$$_{'name'}.label $$_{'name'}\n"; + print "$$_{'name'}.label $$_{'label'}\n"; print "$$_{'name'}.draw LINE2\n"; print "$$_{'name'}.min 0\n"; print "$$_{'name'}.type DERIVE\n"; diff --git a/plugins/docker/docker_memory b/plugins/docker/docker_memory index 6e43c0b7..6fba71b4 100755 --- a/plugins/docker/docker_memory +++ b/plugins/docker/docker_memory @@ -81,7 +81,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") print "graph_vlabel Bytes\n"; print "graph_category virtualization\n"; print "graph_info This graph shows docker container memory usage.\n"; - + foreach(@result) { print "$$_{'name'}.label $$_{'name'}\n"; diff --git a/plugins/docker/example_graphs/docker_cpu_usage.png b/plugins/docker/example-graphs/docker_cpu-day.png similarity index 100% rename from plugins/docker/example_graphs/docker_cpu_usage.png rename to plugins/docker/example-graphs/docker_cpu-day.png diff --git a/plugins/docker/example_graphs/docker_memory_usage.png b/plugins/docker/example-graphs/docker_memory-day.png similarity index 100% rename from plugins/docker/example_graphs/docker_memory_usage.png rename to plugins/docker/example-graphs/docker_memory-day.png diff --git a/plugins/dovecot/dovecot b/plugins/dovecot/dovecot index 303b3ab1..363a1cf1 100755 --- a/plugins/dovecot/dovecot +++ b/plugins/dovecot/dovecot @@ -2,21 +2,22 @@ # # Munin Plugin # to count logins to your dovecot mailserver -# +# # Created by Dominik Schulz # http://developer.gauner.org/munin/ # Contributions by: # - Stephane Enten # - Steve Schnepp -# +# - pcy (make 'Connected Users' DERIVE, check existence of logfile in autoconf) +# # Parameters understood: # # config (required) # autoconf (optional - used by munin-config) -# +# # Config variables: # -# logfile - Where to find the syslog file +# logfile - Where to find the syslog file # # Add the following line to a file in /etc/munin/plugin-conf.d: # env.logfile /var/log/your/logfile.log @@ -34,7 +35,7 @@ LOGFILE=${logfile:-/var/log/mail.log} ###################### if [ "$1" = "autoconf" ]; then - echo yes + [ -f "$LOGFILE" ] && echo yes || echo "no (logfile $LOGFILE not found)" exit 0 fi @@ -53,6 +54,7 @@ if [ "$1" = "config" ]; then done echo 'connected.label Connected Users' + echo "connected.type DERIVE" exit 0 fi @@ -86,7 +88,7 @@ echo -n echo -en "login_tls.value " VALUE=$(egrep -c '[dovecot]?.*Login.*TLS' $LOGFILE) if [ ! -z "$VALUE" ]; then - echo "$VALUE" + echo "$VALUE" else echo "0" fi @@ -97,7 +99,7 @@ echo -n echo -en "login_ssl.value " VALUE=$(egrep -c '[dovecot]?.*Login.*SSL' $LOGFILE) if [ ! -z "$VALUE" ]; then - echo "$VALUE" + echo "$VALUE" else echo "0" fi @@ -108,7 +110,7 @@ echo -n echo -en "login_imap.value " VALUE=$(egrep -c '[dovecot]?.*imap.*Login' $LOGFILE) if [ ! -z "$VALUE" ]; then - echo "$VALUE" + echo "$VALUE" else echo "0" fi @@ -119,8 +121,9 @@ echo -n echo -en "login_pop3.value " VALUE=$(egrep -c '[dovecot]?.*pop3.*Login' $LOGFILE) if [ ! -z "$VALUE" ]; then - echo "$VALUE" + echo "$VALUE" else echo "0" fi echo -n + diff --git a/plugins/dovecot/dovecot.rej b/plugins/dovecot/dovecot.rej deleted file mode 100644 index 6df1087a..00000000 --- a/plugins/dovecot/dovecot.rej +++ /dev/null @@ -1,10 +0,0 @@ -diff a/plugins/dovecot/dovecot b/plugins/dovecot/dovecot (rejected hunks) -@@ -23,7 +23,7 @@ $aborted = 0; - - $logfile = $ENV{'LOGFILE'} || '/var/log/mail.log'; - --# Use an overridden $PATH for all external programs if needed -+# Use an overrided $PATH for all external programs if needed - $DOVEADM = "doveadm"; - - if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { diff --git a/plugins/dovecot/dovecot1 b/plugins/dovecot/dovecot1 index af73b414..83f4d897 100755 --- a/plugins/dovecot/dovecot1 +++ b/plugins/dovecot/dovecot1 @@ -29,19 +29,19 @@ if ( $logfile =~ /dovecot/ ) { $dovelogfile = 1 ; } -# Use an overrided $PATH for all external programs if needed +# Use an overridden $PATH for all external programs if needed $DOVEADM = "doveadm"; if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { if (! -x $DOVEADM) { print "no (no doveadm)\n"; - exit(1); + exit(0); } if (! -f $logfile) { print "no (logfile $logfile does not exist)\n"; - exit(1); + exit(0); } if (-r "$logfile") { @@ -50,7 +50,7 @@ if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { } else { print "no (logfile not readable)\n"; } - exit 1; + exit 0; } if (-f "$logfile.0") { @@ -152,7 +152,7 @@ if (! -f $logfile and ! -f $rotlogfile) { exit 0; } -# dit kan beter maar twee calls zijn toch nodig als we niet zelf aggegreren +# dit kan beter maar twee calls zijn toch nodig also we niet zelf aggegreren # suggestie: doveadm who -1 | awk '{print $1" "$2" "$4}' | sort | uniq -c $connectedimap = `$DOVEADM -f flow who | grep imap | wc -l`; $connectedpop3 = `$DOVEADM -f flow who | grep pop3 | wc -l`; diff --git a/plugins/dovecot/dovecot_stats_ b/plugins/dovecot/dovecot_stats_ old mode 100644 new mode 100755 diff --git a/plugins/drbd/drbd b/plugins/drbd/drbd index 4216902e..49644f9d 100755 --- a/plugins/drbd/drbd +++ b/plugins/drbd/drbd @@ -96,7 +96,7 @@ open (IN, $file ) || die "Could not open $file for reading: $!"; my ($changed) = $_ =~ /changed:(\d*)/; $store->{ $temp }->{'act_log'}->{changed} = $changed if $changed ne undef; } - + my ($ns) = $_ =~ /ns:(\d*)/; $store->{ $temp }->{'ns'} = $ns if $ns ne undef; diff --git a/plugins/drbd/drbd-stat b/plugins/drbd/drbd-stat index 2a6e7259..16e0cae4 100755 --- a/plugins/drbd/drbd-stat +++ b/plugins/drbd/drbd-stat @@ -85,7 +85,7 @@ open (IN, $file ) || die "Could not open $file for reading: $!"; my ($changed) = $_ =~ /changed:(\d*)/; $store->{ $temp }->{'act_log'}->{changed} = $changed if $changed ne undef; } - + my ($ns) = $_ =~ /ns:(\d*)/; $store->{ $temp }->{'ns'} = $ns if $ns ne undef; diff --git a/plugins/drupal/drupal_page_speed_test.php b/plugins/drupal/drupal_page_speed_test.php old mode 100644 new mode 100755 diff --git a/plugins/dspam/dspam_ b/plugins/dspam/dspam_ index 78e0f390..5f7de09a 100755 --- a/plugins/dspam/dspam_ +++ b/plugins/dspam/dspam_ @@ -71,7 +71,7 @@ should be in the format: dspam__, where: a total of all uids, or for a list of all uids (depending on the graph type). NB For advanced uid selection such as 'all users of domain - example.org', please see the environment variable 'pattern' + example.org', please see the environment variable 'pattern' under CONFIGURATION. =head1 INTERPRETATION @@ -89,7 +89,7 @@ The plugin supports the following graph types: display the increase of processed messages over time. relprocessed - Shows the same data as dspam_absprocessed_, but as - messages per minute instead of ever-growing asolute + messages per minute instead of ever-growing absolute values. processed - Shows the same data as dspam_absprocessed_, but as diff --git a/plugins/dvb/2wcomdsr_ b/plugins/dvb/2wcomdsr_ old mode 100644 new mode 100755 index 86c43265..273d1cc5 --- a/plugins/dvb/2wcomdsr_ +++ b/plugins/dvb/2wcomdsr_ @@ -2,7 +2,7 @@ ################################################################################################### # -# Multigraph munin plugin to monitor 2wcom DSR01/DSR02 Professional Audio Satellite-Receivers +# Multigraph munin plugin to monitor 2wcom DSR01/DSR02 Professional Audio Satellite-Receivers # through their web interface status page. # # To use this plugin, copy it to the munin's plugin directory (eg. /usr/share/munin/plugins) @@ -140,7 +140,7 @@ echo "graph_info Detected bit error rate (BER) before the Reed Solomon error cor echo "ber_rs.label RS/BCH BER" echo "ber_rs.critical 0.0002" echo "ber_rs.info BER before Reed Solomon error correction" -echo +echo exit 0;; esac diff --git a/plugins/dvb/femon b/plugins/dvb/femon index eebd6661..7d64e106 100755 --- a/plugins/dvb/femon +++ b/plugins/dvb/femon @@ -2,8 +2,8 @@ # -*- encoding: iso-8859-1 -*- # # Wildcard-plugin to monitor DVB signal information via femon command line utility, -# -# To monitor a dvb device, link femon_ to this file. +# +# To monitor a dvb device, link femon_ to this file. # E.g. # ln -s /usr/share/munin/plugins/femon_ /etc/munin/plugins/femon_adapter0 # ...will monitor /dev/dvb/adapter0. @@ -15,13 +15,13 @@ # Parameters # femonpath - Specify path to femon program (Default: /usr/bin/femon) # graph_args - Specify graph args (Default: --lower-limit 0 --upper-limit 100 --rigid) -# +# # Author: Nicolas Knotzer -# +# # v1.0 02/10/2011 # v1.1 20/10/2011 - Prints OSError.strerror in verbose mode, uses rsplit instead of split for parsing femon output # v1.2 21/10/2011 - Uses subprocess.Popen instead of subprocess.check_output for better compatibility with older python versions (i.e. works with python version >= 2.4) -# v1.3 25/10/2011 - Configure upper and lower graph limits with graph_args environment variable. +# v1.3 25/10/2011 - Configure upper and lower graph limits with graph_args environment variable. # # Copyright (c) 2011 Nicolas Knotzer. # @@ -70,7 +70,7 @@ def find_dvb_adapters() : adapters.append(adapter) except : continue - except : + except : verboselog('Failed to list adapters in /dev/dvb') return(adapters) @@ -78,7 +78,7 @@ def get_dvb_adapter_name() : global plugin_name try : name=[plugin_name[string.rindex(plugin_name,'_')+1:]] - + # Check that the adapter exists in /dev/dvb if not os.path.exists('/dev/dvb/'+name[0]): verboselog('/dev/dvb/'+name[0]+' not found!') @@ -93,7 +93,7 @@ def print_adapter_config(dvb_adapter) : print ('graph_title DVB Femon Sensors '+dvb_adapter[0]) print ('graph_args '+os.getenv('graph_args','--lower-limit 0 --upper-limit 100 --rigid')) print ('graph_vlabel Quality') - print ('graph_category other') + print ('graph_category tv') print ('graph_info This graph shows femon output for your dvb-'+dvb_adapter[0]) print ('str.label Signal Strength') @@ -117,7 +117,7 @@ def print_dvb_adapter_values(dvb_adapter) : try : verboselog('Reading values from '+dvb_adapter[0]) mypipe = subprocess.Popen([os.getenv('femonpath','/usr/bin/femon'), '-H', '-c 1', '-a '+dvb_adapter[0].replace('adapter','')], stdout=subprocess.PIPE) - femon_output = mypipe.communicate()[0] + femon_output = mypipe.communicate()[0] verboselog(femon_output) except OSError, e: verboselog('Cannot access femon values! Check user rights or proper femon installation.') @@ -145,13 +145,13 @@ if len(sys.argv)>1 : print_adapter_config (dvb_adapter) sys.exit(0) elif sys.argv[1]=="autoconf" : - if os.path.exists(os.getenv('femonpath','/usr/bin/femon')) : + if os.path.exists(os.getenv('femonpath','/usr/bin/femon')) : if not find_dvb_adapters(): print('no (no dvb adapters accessible)') else : print('yes') sys.exit(0) - else : + else : print('no (femon not found)') sys.exit(0) elif sys.argv[1]=="suggest" : @@ -179,16 +179,16 @@ femon_ - Munin wildcard-plugin to monitor dvb signal information attribute value =head1 APPLICABLE SYSTEMS -Node with B interpreter and B +Node with B interpreter and B installed and in function. =head1 CONFIGURATION - + =head2 Create link in service directory To monitor a dvb device, create a link in the service directory of the munin-node named femon_, which is pointing to this file. - + E.g. ln -s /usr/share/munin/plugins/femon_ /etc/munin/plugins/femon_adapter0 @@ -211,7 +211,7 @@ So following minimal configuration in plugin-conf.d/munin-node is needed. =head2 Set Parameter if needed femonpath - Specify path to femon program (Default: /usr/bin/femon) - + =head1 INTERPRETATION diff --git a/plugins/ejabberd/ejabberd_resources_ b/plugins/ejabberd/ejabberd_resources_ index d820b2de..37196951 100755 --- a/plugins/ejabberd/ejabberd_resources_ +++ b/plugins/ejabberd/ejabberd_resources_ @@ -27,8 +27,8 @@ #%# capabilities=autoconf suggest EJABBERDCTL_CFG=${ejabberdctl_cfg:-/etc/ejabberd/ejabberdctl.cfg} -source $EJABBERDCTL_CFG 2>/dev/null -source $MUNIN_LIBDIR/plugins/plugin.sh +. "$EJABBERDCTL_CFG" 2>/dev/null +. "$MUNIN_LIBDIR/plugins/plugin.sh" EJABBERDCTL=${ejabberdctl:-$(which ejabberdctl)} @@ -45,7 +45,7 @@ function ejabberd_exec() { echo "$1" | su - ejabberd -c "$ERL_CALL -e -n $ERLANG_NODE" | sed -re 's/^\{ok, (.*)\}$/\1/' } -SCRIPT_NAME=$(basename $0) +SCRIPT_NAME=$(basename "$0") RESOURCE_TYPE="${SCRIPT_NAME/ejabberd_resources_/}" RESOURCE_BASE=1000 [ "$RESOURCE_TYPE" = "memory" ] && RESOURCE_BASE=1024 @@ -57,8 +57,10 @@ function hosts_list() { function ejabberd_report_online_users() { [ "$1" = "config" ] && echo 'graph_vlabel users' for host in $(hosts_list); do - local clean_host=$(clean_fieldname $host) - local ejabberd_command="length(ejabberd_sm:get_vh_session_list(\"$host\"))" + local clean_host + local ejabberd_command + clean_host=$(clean_fieldname "$host") + ejabberd_command="length(ejabberd_sm:get_vh_session_list(\"$host\"))" if [ "$1" = "config" ]; then cat < 0 } +DEFAULT_CACHE = { start: 0 }.freeze $debug_mode = ARGV.first == 'debug' if $debug_mode - log_info = DEFAULT_CACHE + log_info = DEFAULT_CACHE else - begin - log_info = YAML.load IO.read(CACHE_FILE) - rescue - log_info = DEFAULT_CACHE - end + begin + log_info = YAML.load IO.read(CACHE_FILE) + rescue StandardError + log_info = DEFAULT_CACHE + end - if File.size(LOG_FILE) < log_info[:start] - # logrotate? - log_info = DEFAULT_CACHE - end + if File.size(LOG_FILE) < log_info[:start] + # logrotate? + log_info = DEFAULT_CACHE + end end if ARGV.first == 'reset' - log_info = { :start => File.size(LOG_FILE)-1 } - puts 'Log reset' + log_info = { start: File.size(LOG_FILE) - 1 } + puts 'Log reset' end new_data = '' File.open(LOG_FILE, 'rb') do |flog| - flog.seek(log_info[:start]) - new_data = flog.read + flog.seek(log_info[:start]) + new_data = flog.read end KNOWN_LOG_TYPES = [ - # each element is an instance of Array. 1st item: error description, others: text to search log for - ['EJAB-1482 Crash when waiting for item', - ['wait_for_']], - ['EJAB-1483 ODBC sup failure (wrong PID?)', - ['ejabberd_odbc_sup']], - ['EJAB-1483 ODBC sup wrong PID failure echo', - ["mod_pubsub_odbc,'-unsubscribe"]], - ['DNS failure', - ['You should check your DNS configuration']], - ['Database unavailable/too slow', - ['Database was not available or too slow']], - ['State machine terminated: timeout', - ['State machine', - 'terminating', - 'Reason for', - 'timeout']], - ['The auth module returned an error', - ['The authentication module', - 'returned an error']], - ['MySQL disconnected', - ['mysql', - 'Received unknown signal, exiting']], - ['Connecting to MySQL: failed', - ['mysql', - 'Failed connecting to']], - ['Timeout while running a hook', - ['ejabberd_hooks', - 'timeout']], - ['SQL transaction restarts exceeded', - ['SQL transaction restarts exceeded']], - ['Unexpected info', - ['nexpected info']], - ['Other sql_cmd timeout', - ['sql_cmd']], - ['System limit hit: ports', # check with length(erlang:ports())., set in ejabberdctl config file - ['system_limit', - 'open_port']], - ['Other system limit hit', # processes? check with erlang:system_info(process_count)., erlang:system_info(process_limit)., set in ejabberdctl cfg - ['system_limit']], - ['Generic server terminating', - ['Generic server', - 'terminating']], - ['Mnesia table shrinked', - ['shrinking table']], - ['Admin access failed', - ['Access of', - 'failed with error']], - ['MySQL sock timedout', - ['mysql_', - ': Socket', - 'timedout']], - ['Configuration error', - ['{badrecord,config}']], - ['Strange vCard error (vhost)', - ['error found when trying to get the vCard']], - ['Mnesia is overloaded', - ['Mnesia is overloaded']], - ['MySQL: init failed recv data', - ['mysql_conn: init failed receiving data']], - ['TCP Error', - ['Failed TCP']] -] + # each element is an instance of Array. 1st item: error description, others: text to search log for + ['EJAB-1482 Crash when waiting for item', + ['wait_for_']], + ['EJAB-1483 ODBC sup failure (wrong PID?)', + ['ejabberd_odbc_sup']], + ['EJAB-1483 ODBC sup wrong PID failure echo', + ["mod_pubsub_odbc,'-unsubscribe"]], + ['DNS failure', + ['You should check your DNS configuration']], + ['Database unavailable/too slow', + ['Database was not available or too slow']], + ['State machine terminated: timeout', + ['State machine', + 'terminating', + 'Reason for', + 'timeout']], + ['The auth module returned an error', + ['The authentication module', + 'returned an error']], + ['MySQL disconnected', + ['mysql', + 'Received unknown signal, exiting']], + ['Connecting to MySQL: failed', + ['mysql', + 'Failed connecting to']], + ['Timeout while running a hook', + %w[ejabberd_hooks + timeout]], + ['SQL transaction restarts exceeded', + ['SQL transaction restarts exceeded']], + ['Unexpected info', + ['nexpected info']], + ['Other sql_cmd timeout', + ['sql_cmd']], + ['System limit hit: ports', # check with length(erlang:ports())., set in ejabberdctl config file + %w[system_limit + open_port]], + ['Other system limit hit', # processes? check with erlang:system_info(process_count)., erlang:system_info(process_limit)., set in ejabberdctl cfg + ['system_limit']], + ['Generic server terminating', + ['Generic server', + 'terminating']], + ['Mnesia table shrinked', + ['shrinking table']], + ['Admin access failed', + ['Access of', + 'failed with error']], + ['MySQL sock timedout', + ['mysql_', + ': Socket', + 'timedout']], + ['Configuration error', + ['{badrecord,config}']], + ['Strange vCard error (vhost)', + ['error found when trying to get the vCard']], + ['Mnesia is overloaded', + ['Mnesia is overloaded']], + ['MySQL: init failed recv data', + ['mysql_conn: init failed receiving data']], + ['TCP Error', + ['Failed TCP']] +].freeze def log_type(text) - KNOWN_LOG_TYPES.find_index { |entry| - entry[1].all? { |substr| text.include? substr } - } + KNOWN_LOG_TYPES.find_index do |entry| + entry[1].all? { |substr| text.include? substr } + end end -new_data.split("\n=").each { |report| - next if report.empty? - report =~ /\A(\w+) REPORT==== (.*) ===\n(.*)\z/m - type, time, text = $1, $2, $3 - next unless type and time and text +new_data.split("\n=").each do |report| + next if report.empty? - log_info[type] = (log_info[type] || 0) + 1 - if sub_type = log_type(text) - log_info[sub_type] = (log_info[sub_type] || 0) + 1 - elsif $debug_mode - warn "Unparsed log entry #{type}: #{text} at #{time}" - end -} + report =~ /\A(\w+) REPORT==== (.*) ===\n(.*)\z/m + type = Regexp.last_match(1) + time = Regexp.last_match(2) + text = Regexp.last_match(3) + next unless type && time && text + + log_info[type] = (log_info[type] || 0) + 1 + if sub_type = log_type(text) + log_info[sub_type] = (log_info[sub_type] || 0) + 1 + elsif $debug_mode + warn "Unparsed log entry #{type}: #{text} at #{time}" + end +end log_info[:start] += new_data.size File.open(CACHE_FILE, 'w') { |f| f.write log_info.to_yaml } unless $debug_mode if ARGV.first == 'config' - puts <, where is any - arbitrary name of your storage system. The plugin will return in its + + Make a link from /usr/share/munin/plugins/emc_vnx_dm_basic_stats to + /etc/munin/plugins/emc_vnx_dm_basic_stats_, where is any + arbitrary name of your storage system. The plugin will return in its answer as "host_name" field. - + For example, assume your storage system is called "VNX5300". - Make a configuration file at + Make a configuration file at /etc/munin/plugin-conf.d/emc_vnx_block_lun_perfdata_VNX5300. For example: - + [emc_vnx_block_lun_perfdata_VNX5300] - user munin + user munin env.username operator1 - env.cs_addr 192.168.1.1 192.168.1.2 + env.cs_addr 192.168.1.1 192.168.1.2 or: [emc_vnx_block_lun_perfdata_VNX5300] - user munin + user munin env.username operator1 env.localcli /opt/Navisphere/bin/naviseccli env.sp_addr 192.168.0.3 192.168.0.4 env.blockpw foobar - Where: + Where: user - SSH Client local user env.username - Remote user with Operator role for Block or File part env.cs_addr - Control Stations addresses for remote (indirect) access. - env.localcli - Optional. Path of localhost 'Naviseccli' binary. If this + env.localcli - Optional. Path of localhost 'Naviseccli' binary. If this variable is set, env.cs_addr is ignored, and local 'navicli' is used. Requires env.blockpw variable. - env.sp_addr - Default is "SPA SPB". In case of "direct" connection to + env.sp_addr - Default is "SPA SPB". In case of "direct" connection to Storage Processors, their addresses/hostnames are written here. env.blockpw - Password for connecting to Storage Processors =head1 ERRATA - It counts Queue Length in not fully correct way. We take parameters totally + It counts Queue Length in not fully correct way. We take parameters totally from both SP's, but after we divide them independently by load of SPA and SPB. Anyway, in most AAA / ALUA cases the formula is correct. @@ -165,7 +165,7 @@ else NAVICLI="/nas/sbin/navicli" fi -# Prints "10" on stdout if found Primary Online control station. "11" - for Secondary Online control station. +# Prints "10" on stdout if found Primary Online control station. "11" - for Secondary Online control station. ssh_check_cmd() { ssh -q "$username@$1" "/nasmcd/sbin/getreason | grep -w \"slot_\$(/nasmcd/sbin/t2slot)\" | cut -d- -f1 | awk '{print \$1}' " } @@ -182,7 +182,7 @@ check_conf_and_set_vars () { echo "No control station addresses ('cs_addr' environment variable)!" return 1 fi - #Choosing Cotrol Station. Code have to be "10" + #Choosing Control Station. Code have to be "10" for CS in $cs_addr; do if [[ "10" = "$(ssh_check_cmd "$CS")" ]]; then PRIMARY_CS=$CS @@ -253,7 +253,7 @@ echo "host_name ${TARGET}" echo if [ "$1" = "config" ] ; then - cat <<-EOF + cat <<-EOF multigraph emc_vnx_block_blocks graph_category disk graph_title EMC VNX 5300 LUN Blocks @@ -263,7 +263,7 @@ if [ "$1" = "config" ] ; then while read -r LUN ; do LUN="$(clean_fieldname "$LUN")" - cat <<-EOF + cat <<-EOF ${LUN}_read.label none ${LUN}_read.graph no ${LUN}_read.min 0 @@ -304,8 +304,8 @@ if [ "$1" = "config" ] ; then multigraph emc_vnx_block_ticks graph_category disk graph_title EMC VNX 5300 Counted Load per LUN - graph_vlabel Load, % * Number of LUNs - graph_args --base 1000 -l 0 -r + graph_vlabel Load, % * Number of LUNs + graph_args --base 1000 -l 0 -r EOF echo -n "graph_order " while read -r LUN ; do @@ -332,7 +332,7 @@ if [ "$1" = "config" ] ; then ${LUN}_idleticks_spb.label $LUN Idle Ticks SPB ${LUN}_idleticks_spb.type COUNTER ${LUN}_idleticks_spb.graph no - ${LUN}_load_spa.label $LUN load SPA + ${LUN}_load_spa.label $LUN load SPA ${LUN}_load_spa.draw AREASTACK ${LUN}_load_spb.label $LUN load SPB ${LUN}_load_spb.draw AREASTACK @@ -342,7 +342,7 @@ if [ "$1" = "config" ] ; then done <<< "$LUNLIST" cat <<-EOF - + multigraph emc_vnx_block_outstanding graph_category disk graph_title EMC VNX 5300 Sum of Outstanding Requests @@ -351,14 +351,14 @@ if [ "$1" = "config" ] ; then EOF while read -r LUN ; do LUN="$(clean_fieldname "$LUN")" - cat <<-EOF + cat <<-EOF ${LUN}_outstandsum.label $LUN ${LUN}_outstandsum.type COUNTER EOF done <<< "$LUNLIST" cat <<-EOF - + multigraph emc_vnx_block_nonzeroreq graph_category disk graph_title EMC VNX 5300 Non-Zero Request Count Arrivals @@ -392,7 +392,7 @@ if [ "$1" = "config" ] ; then multigraph emc_vnx_block_queue graph_category disk - graph_title EMC VNX 5300 Counted Block Queue Length + graph_title EMC VNX 5300 Counted Block Queue Length graph_vlabel Length EOF while read -r LUN ; do @@ -451,10 +451,10 @@ if [ "$1" = "config" ] ; then cat <<-EOF ${SPclean}_total_busyticks.label ${SP} ${SPclean}_total_busyticks.graph no - ${SPclean}_total_busyticks.type COUNTER + ${SPclean}_total_busyticks.type COUNTER ${SPclean}_total_bt.label ${SP} ${SPclean}_total_bt.graph no - ${SPclean}_total_bt.type COUNTER + ${SPclean}_total_bt.type COUNTER ${SPclean}_total_idleticks.label ${SP} ${SPclean}_total_idleticks.graph no ${SPclean}_total_idleticks.type COUNTER @@ -469,8 +469,8 @@ fi #BIGCMD="$SSH" while read -r LUN ; do FILTERLUN="$(clean_fieldname "$LUN")" - BIGCMD+="$NAVICLI lun -list -name $LUN -perfData | - sed -ne 's/^Blocks Read\:\ */${FILTERLUN}_read.value /p; + BIGCMD+="$NAVICLI lun -list -name $LUN -perfData | + sed -ne 's/^Blocks Read\:\ */${FILTERLUN}_read.value /p; s/^Blocks Written\:\ */${FILTERLUN}_write.value /p; s/Read Requests\:\ */${FILTERLUN}_readreq.value /p; s/Write Requests\:\ */${FILTERLUN}_writereq.value /p; diff --git a/plugins/emc/emc_vnx_file_ b/plugins/emc/emc_vnx_file_ index ed5271a4..c18b5b2b 100755 --- a/plugins/emc/emc_vnx_file_ +++ b/plugins/emc/emc_vnx_file_ @@ -2,9 +2,9 @@ : <<=cut -=head1 NAME +=head1 NAME - emc_vnx_file_stats - Plugin to monitor Basic, NFSv3 and NFSv4 statistics of + emc_vnx_file_stats - Plugin to monitor Basic, NFSv3 and NFSv4 statistics of EMC VNX 5300 Unified Storage system's Datamovers =head1 AUTHOR @@ -22,24 +22,24 @@ =head1 DESCRIPTION - The plugin monitors basic statistics of EMC Unified Storage system Datamovers - and NFS statistics of EMC VNX5300 Unified Storage system. Probably it can + The plugin monitors basic statistics of EMC Unified Storage system Datamovers + and NFS statistics of EMC VNX5300 Unified Storage system. Probably it can also be compatible with other Isilon or Celerra systems. It uses SSH to connect - to Control Stations, then remotely executes '/nas/sbin/server_stats' and - fetches and parses data from it. It supports gathering data both from - active/active and active/passive Datamover configurations, ignoring offline or - standby Datamovers. + to Control Stations, then remotely executes '/nas/sbin/server_stats' and + fetches and parses data from it. It supports gathering data both from + active/active and active/passive Datamover configurations, ignoring offline or + standby Datamovers. If all Datamovers are offline or absent, the plugin returns error. This plugin also automatically chooses Primary Control Station from the list by calling '/nasmcd/sbin/getreason' and '/nasmcd/sbin/t2slot'. - + At the moment data is gathered from the following statistics sources: * nfs.v3.op - Tons of timings about NFSv3 RPC calls * nfs.v4.op - Tons of timings about NFSv4 RPC calls * nfs.client - Here new Client addresses are rescanned and added automatically. * basic-std Statistics Group - Basic Statistics of Datamovers (eg. CPU, Memory etc.) - + It's quite easy to comment out unneeded data to make graphs less overloaded or to add new statistics sources. @@ -61,7 +61,7 @@ EMC VNX 5300 FileResolve These are NFS (v3,v4) Graphs. - Graph category NFS: + Graph category FS: EMC VNX 5300 NFSv3 Calls per second EMC VNX 5300 NFSv3 uSeconds per call EMC VNX 5300 NFSv3 Op % @@ -78,48 +78,48 @@ =head1 COMPATIBILITY - The plugin has been written for being compatible with EMC VNX5300 Storage + The plugin has been written for being compatible with EMC VNX5300 Storage system, as this is the only EMC storage which i have. By the way, i am pretty sure it can also work with other VNX1 storages, like VNX5100 and VNX5500. - About VNX2 series, i don't know whether the plugin will be able to work with + About VNX2 series, i don't know whether the plugin will be able to work with them. Maybe it would need some corrections in command-line backend. The same - situation is with other EMC systems, so i encourage you to try and fix the - plugin. + situation is with other EMC systems, so i encourage you to try and fix the + plugin. =head1 CONFIGURATION - The plugin uses SSH to connect to Control Stations. It's possible to use + The plugin uses SSH to connect to Control Stations. It's possible to use 'nasadmin' user, but it would be better if you create read-only global user by Unisphere Client. The user should have only Operator role. I created "operator" user but due to the fact that Control Stations already - had one internal "operator" user, the new one was called "operator1". So be + had one internal "operator" user, the new one was called "operator1". So be careful. After that, copy .bash_profile from /home/nasadmin to a newly created /home/operator1 - + On munin-node side choose a user which will be used to connect through SSH. - Generally user "munin" is ok. Then, execute "sudo su munin -s /bin/bash", - "ssh-keygen" and "ssh-copy-id" to both Control Stations with newly created + Generally user "munin" is ok. Then, execute "sudo su munin -s /bin/bash", + "ssh-keygen" and "ssh-copy-id" to both Control Stations with newly created user. - - Make a link from /usr/share/munin/plugins/emc_vnx_file_stats to - /etc/munin/plugins/. If you want to get NFS statistics, name the link as + + Make a link from /usr/share/munin/plugins/emc_vnx_file_stats to + /etc/munin/plugins/. If you want to get NFS statistics, name the link as "emc_vnx_file_nfs_stats_", otherwise to get Basic Datamover statistics you have to name it "emc_vnx_file_basicdm_stats_", where is any - arbitrary name of your storage system. The plugin will return in its + arbitrary name of your storage system. The plugin will return in its answer as "host_name" field. For example, assume your storage system is called "VNX5300". - Make a configuration file at + Make a configuration file at /etc/munin/plugin-conf.d/emc_vnx_file_stats_VNX5300 - - [emc_vnx_file_*] - user munin - env.username operator1 - env.cs_addr 192.168.1.1 192.168.1.2 - env.nas_servers server_2 server_3 - Where: + [emc_vnx_file_*] + user munin + env.username operator1 + env.cs_addr 192.168.1.1 192.168.1.2 + env.nas_servers server_2 server_3 + + Where: user - SSH Client local user env.username - Remote user with Operator role env.cs_addr - Control Stations addresses @@ -143,7 +143,7 @@ cs_addr=${cs_addr:=""} username=${username:=""} nas_servers=${nas_servers:="server_2 server_3"} -# Prints "10" on stdout if found Primary Online control station. "11" - for Secondary Online control station. +# Prints "10" on stdout if found Primary Online control station. "11" - for Secondary Online control station. ssh_check_cmd() { ssh -q "$username@$1" "/nasmcd/sbin/getreason | grep -w \"slot_\$(/nasmcd/sbin/t2slot)\" | cut -d- -f1 | awk '{print \$1}' " @@ -160,7 +160,7 @@ check_conf () { return 1 fi - #Choosing Cotrol Station. Code have to be "10" + #Choosing Control Station. Code have to be "10" for CS in $cs_addr; do if [[ "10" = "$(ssh_check_cmd "$CS")" ]]; then PRIMARY_CS=$CS @@ -192,7 +192,7 @@ if [ "$1" = "suggest" ]; then fi STATSTYPE=$(echo "${0##*/}" | cut -d _ -f 1-5) -if [ "$STATSTYPE" = "emc_vnx_file_nfs_stats" ]; then STATSTYPE=NFS; +if [ "$STATSTYPE" = "emc_vnx_file_nfs_stats" ]; then STATSTYPE=NFS; elif [ "$STATSTYPE" = "emc_vnx_file_basicdm_stats" ]; then STATSTYPE=BASICDM; else echo "Do not know what to do. Name the plugin as 'emc_vnx_file_nfs_stats_' or 'emc_vnx_file_basicdm_stats_'" >&2; exit 1; fi @@ -213,9 +213,9 @@ if [ "$1" = "config" ] ; then run_remote nas_server -i "$server" | grep -q 'type *= nas' || continue nas_server_ok=TRUE filtered_server="$(clean_fieldname "$server")" - + if [ "$STATSTYPE" = "BASICDM" ] ; then - cat <<-EOF + cat <<-EOF multigraph emc_vnx_cpu_percent graph_title EMC VNX 5300 Datamover CPU Util % graph_vlabel % @@ -259,7 +259,7 @@ if [ "$1" = "config" ] ; then ${server}_total.label ${server} Total ${server}_freebuffer.label ${server} Free Buffer ${server}_encumbered.label ${server} Encumbered - + multigraph emc_vnx_filecache graph_title EMC VNX 5300 File Buffer Cache graph_vlabel per second @@ -272,7 +272,7 @@ if [ "$1" = "config" ] ; then ${server}_w_hits.label Watermark Hits ${server}_hits.label Hits ${server}_lookups.label Lookups - + multigraph emc_vnx_fileresolve graph_title EMC VNX 5300 FileResolve graph_vlabel Entries @@ -286,8 +286,8 @@ if [ "$1" = "config" ] ; then if [ "$STATSTYPE" = "NFS" ] ; then #nfs.v3.op data # [nasadmin@mnemonic0 ~]$ server_stats server_2 -info nfs.v3.op -# server_2 : -# +# server_2 : +# # name = nfs.v3.op # description = NFS V3 per operation statistics # type = Set @@ -296,7 +296,7 @@ if [ "$1" = "config" ] ; then # member_of = nfs.v3 member_elements_by_line=$(run_remote server_stats "$server" -info nfs.v3.op | grep member_elements | sed -ne 's/^.*= //p') IFS=',' read -ra graphs <<< "$member_elements_by_line" - cat <<-EOF + cat <<-EOF multigraph vnx_emc_v3_calls_s graph_title EMC VNX 5300 NFSv3 Calls per second graph_vlabel Calls @@ -309,7 +309,7 @@ if [ "$1" = "config" ] ; then done cat <<-EOF - + multigraph vnx_emc_v3_usec_call graph_title EMC VNX 5300 NFSv3 uSeconds per call graph_vlabel uSec / call @@ -362,7 +362,7 @@ if [ "$1" = "config" ] ; then echo "${server}_$field.label $server $field" done cat <<-EOF - + multigraph vnx_emc_v4_op_percent graph_title EMC VNX 5300 NFSv4 Op % graph_vlabel % @@ -376,7 +376,7 @@ if [ "$1" = "config" ] ; then done #nfs.client data -# Total Read Write Suspicious Total Read Write Avg +# Total Read Write Suspicious Total Read Write Avg # Ops/s Ops/s Ops/s Ops diff KiB/s KiB/s KiB/s uSec/call member_elements_by_line=$(run_remote server_stats server_2 -monitor nfs.client -count 1 -terminationsummary no -titles never | sed -ne 's/^.*id=//p' | cut -d' ' -f1) #Somewhy readarray adds extra \n in the end of each variable. So, we use read() with a workaround @@ -437,8 +437,8 @@ if [ "$1" = "config" ] ; then done #nfs-std -# Timestamp NFS Read Read Read Size Write Write Write Size Active -# Ops/s Ops/s KiB/s Bytes Ops/s KiB/s Bytes Threads +# Timestamp NFS Read Read Read Size Write Write Write Size Active +# Ops/s Ops/s KiB/s Bytes Ops/s KiB/s Bytes Threads cat <<-EOF multigraph vnx_emc_nfs_std_nfs_ops @@ -451,7 +451,7 @@ if [ "$1" = "config" ] ; then echo "${filtered_server}_wops.label $server Write Ops/s" echo "${filtered_server}_wops.draw STACK" echo "${filtered_server}_tops.label $server Total Ops/s" - + cat <<-EOF multigraph vnx_emc_nfs_std_nfs_b_s @@ -465,7 +465,7 @@ if [ "$1" = "config" ] ; then echo "${filtered_server}_wbs.draw STACK" echo "${filtered_server}_tbs.label $server Total B/s" echo "${filtered_server}_tbs.cdef ${filtered_server}_rbs,${filtered_server}_wbs,+" - + cat <<-EOF multigraph vnx_emc_nfs_std_nfs_avg @@ -499,10 +499,10 @@ for server in $nas_servers; do if [ "$STATSTYPE" = "BASICDM" ] ; then #basicdm data -# [nasadmin@mnemonic0 ~]$ server_stats server_2 -count 1 -terminationsummary no -# server_2 CPU Network Network dVol dVol -# Timestamp Util In Out Read Write -# % KiB/s KiB/s KiB/s KiB/s +# [nasadmin@mnemonic0 ~]$ server_stats server_2 -count 1 -terminationsummary no +# server_2 CPU Network Network dVol dVol +# Timestamp Util In Out Read Write +# % KiB/s KiB/s KiB/s KiB/s # 20:42:26 9 16432 3404 1967 24889 member_elements_by_line=$(run_remote server_stats "$server" -count 1 -terminationsummary no -titles never | grep '^[^[:space:]]') @@ -519,10 +519,10 @@ for server in $nas_servers; do echo "${server}_stor_read.value $((graphs[4] * 1024))" echo "${server}_stor_write.value $((graphs[5] * 1024))" -# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor kernel.memory -count 1 -terminationsummary no -# server_2 Free Buffer Buffer Buffer Buffer Buffer Buffer Cache Encumbered FileResolve FileResolve FileResolve Free KiB Page Total Used KiB Memory -# Timestamp Buffer Cache High Cache Cache Cache Cache Low Watermark Memory Dropped Max Used Size Memory Util -# KiB Watermark Hits/s Hit % Hits/s Lookups/s Watermark Hits/s Hits/s KiB Entries Limit Entries KiB KiB % +# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor kernel.memory -count 1 -terminationsummary no +# server_2 Free Buffer Buffer Buffer Buffer Buffer Buffer Cache Encumbered FileResolve FileResolve FileResolve Free KiB Page Total Used KiB Memory +# Timestamp Buffer Cache High Cache Cache Cache Cache Low Watermark Memory Dropped Max Used Size Memory Util +# KiB Watermark Hits/s Hit % Hits/s Lookups/s Watermark Hits/s Hits/s KiB Entries Limit Entries KiB KiB % # 20:44:14 3522944 0 96 11562 12010 0 0 3579268 0 0 0 3525848 8 6291456 2765608 44 member_elements_by_line=$(run_remote server_stats "$server" -monitor kernel.memory -count 1 -terminationsummary no -titles never | grep '^[^[:space:]]') @@ -530,7 +530,7 @@ for server in $nas_servers; do echo -e "\nmultigraph emc_vnx_memory" #Reserved for math - echo "${server}_total.value $((graphs[14] / 1))" + echo "${server}_total.value $((graphs[14] / 1))" echo "${server}_used.value $((graphs[15] / 1))" echo "${server}_free.value $((graphs[12] / 1))" echo "${server}_freebuffer.value $((graphs[1] / 1))" @@ -553,9 +553,9 @@ for server in $nas_servers; do if [ "$STATSTYPE" = "NFS" ] ; then #nfs.v3.op data # [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.v3.op -count 1 -terminationsummary no -# server_2 NFS Op NFS NFS Op NFS NFS Op % -# Timestamp Op Errors Op -# Calls/s diff uSec/Call +# server_2 NFS Op NFS NFS Op NFS NFS Op % +# Timestamp Op Errors Op +# Calls/s diff uSec/Call # 22:14:41 v3GetAttr 30 0 23 21 # v3Lookup 40 0 98070 27 # v3Access 50 0 20 34 @@ -571,7 +571,7 @@ for server in $nas_servers; do while IFS=$'\n' read -ra graphs ; do elements_array+=( $graphs ) done <<< "$member_elements_by_line" - + if [ "${#elements_array[@]}" -eq "0" ]; then LINES=0; fi echo "multigraph vnx_emc_v3_calls_s" @@ -593,9 +593,9 @@ for server in $nas_servers; do #nfs.v4.op data # [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.v4.op -count 1 -terminationsummary no -# server_2 NFS Op NFS NFS Op NFS NFS Op % -# Timestamp Op Errors Op -# Calls/s diff uSec/Call +# server_2 NFS Op NFS NFS Op NFS NFS Op % +# Timestamp Op Errors Op +# Calls/s diff uSec/Call # 22:13:14 v4Compound 2315 0 7913 30 # v4Access 246 0 5 3 # v4Close 133 0 11 2 @@ -643,9 +643,9 @@ for server in $nas_servers; do elements_array=() #nfs.client data -# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.client -count 1 -terminationsummary no -# server_2 Client NFS NFS NFS NFS NFS NFS NFS NFS -# Timestamp Total Read Write Suspicious Total Read Write Avg +# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.client -count 1 -terminationsummary no +# server_2 Client NFS NFS NFS NFS NFS NFS NFS NFS +# Timestamp Total Read Write Suspicious Total Read Write Avg # Ops/s Ops/s Ops/s Ops diff KiB/s KiB/s KiB/s uSec/call # 20:26:38 id=192.168.1.223 2550 20 2196 13 4673 159 4514 1964 # id=192.168.1.2 691 4 5 1 1113 425 688 2404 @@ -687,9 +687,9 @@ for server in $nas_servers; do #nfs-std # bash-3.2$ server_stats server_2 -monitor nfs-std -# server_2 Total NFS NFS NFS Avg NFS NFS NFS Avg NFS -# Timestamp NFS Read Read Read Size Write Write Write Size Active -# Ops/s Ops/s KiB/s Bytes Ops/s KiB/s Bytes Threads +# server_2 Total NFS NFS NFS Avg NFS NFS NFS Avg NFS +# Timestamp NFS Read Read Read Size Write Write Write Size Active +# Ops/s Ops/s KiB/s Bytes Ops/s KiB/s Bytes Threads # 18:14:52 688 105 6396 62652 1 137 174763 3 member_elements_by_line=$(run_remote server_stats "$server" -monitor nfs-std -count 1 -terminationsummary no -titles never | grep '^[^[:space:]]') IFS=$' ' read -ra graphs <<< "$member_elements_by_line" @@ -700,12 +700,12 @@ for server in $nas_servers; do echo "${filtered_server}_rops.value ${graphs[2]}" echo "${filtered_server}_wops.value ${graphs[5]}" echo "${filtered_server}_tops.value ${graphs[1]}" - + echo -e "\nmultigraph vnx_emc_nfs_std_nfs_b_s" echo "${filtered_server}_rbs.value $((graphs[3] * 1024))" echo "${filtered_server}_wbs.value $((graphs[6] * 1024))" echo "${filtered_server}_tbs.value 0" - + echo -e "\nmultigraph vnx_emc_nfs_std_nfs_avg" echo "${filtered_server}_avg_readsize.value ${graphs[4]}" diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-1.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-1.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-2.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-2.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-3.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-3.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-4.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-4.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-5.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-5.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-6.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-6.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-7.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-7.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-8.png b/plugins/emc/example-graphs/emc_vnx_block_lun_perfdata-8.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-1.png b/plugins/emc/example-graphs/emc_vnx_file_-1.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-2.png b/plugins/emc/example-graphs/emc_vnx_file_-2.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-3.png b/plugins/emc/example-graphs/emc_vnx_file_-3.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-4.png b/plugins/emc/example-graphs/emc_vnx_file_-4.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-5.png b/plugins/emc/example-graphs/emc_vnx_file_-5.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-6.png b/plugins/emc/example-graphs/emc_vnx_file_-6.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-7.png b/plugins/emc/example-graphs/emc_vnx_file_-7.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-8.png b/plugins/emc/example-graphs/emc_vnx_file_-8.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-9.png b/plugins/emc/example-graphs/emc_vnx_file_-9.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-a.png b/plugins/emc/example-graphs/emc_vnx_file_-a.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-b.png b/plugins/emc/example-graphs/emc_vnx_file_-b.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-c.png b/plugins/emc/example-graphs/emc_vnx_file_-c.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-g.png b/plugins/emc/example-graphs/emc_vnx_file_-g.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-h.png b/plugins/emc/example-graphs/emc_vnx_file_-h.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-i.png b/plugins/emc/example-graphs/emc_vnx_file_-i.png old mode 100755 new mode 100644 diff --git a/plugins/emc/example-graphs/emc_vnx_file_-j.png b/plugins/emc/example-graphs/emc_vnx_file_-j.png old mode 100755 new mode 100644 diff --git a/plugins/fan/ibmfan b/plugins/fan/ibmfan index 95d147a2..cdc338e0 100755 --- a/plugins/fan/ibmfan +++ b/plugins/fan/ibmfan @@ -1,10 +1,10 @@ #!/bin/sh -# +# # Plugin to monitor fan speed on an IBM/Lenovo Laptop # # This plugin reads the current speed of the system fan from # the /proc file system. As it queries specific files provided -# by kernel modules for IBM/Lenovo Laptops, it probably only +# by kernel modules for IBM/Lenovo Laptops, it probably only # works for those, but it should be easy to adapt to others # if similar information is available for other types of laptops. # @@ -19,14 +19,13 @@ if [ "$1" = "autoconf" ]; then if [ -r /proc/acpi/ibm/fan ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi - + if [ "$1" = "config" ]; then echo 'graph_title Fan speed' echo 'graph_args --base 1000 -l 0' diff --git a/plugins/fax/faxstat b/plugins/fax/faxstat index 75f99366..61962dec 100755 --- a/plugins/fax/faxstat +++ b/plugins/fax/faxstat @@ -20,11 +20,10 @@ FAXQ=${faxq:=/usr/sbin/faxqclean} if [ "$1" = "autoconf" ]; then if [ -x ${FAXSTAT} ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi JOBTYPES="Running Waiting Done" diff --git a/plugins/firebird/firebird b/plugins/firebird/firebird index 1e70bd79..7eabb3fe 100755 --- a/plugins/firebird/firebird +++ b/plugins/firebird/firebird @@ -7,7 +7,7 @@ # # "employee" must be an alias configured in the firebird aliases.conf file # -# You will also need to set +# You will also need to set # # [firebird_employee] # user root @@ -53,24 +53,24 @@ gstat=$(which gstat 2> /dev/null) ${gstat:=/opt/firebird/bin/gstat} -h ${db} | awk -F'[\t]+' \ '{ sub(/^ */,""); - if ($2 == "Oldest transaction") - { + if ($2 == "Oldest transaction") + { oldest_trans=$3; print "oldest_transaction.value " $3 } if ($2 == "Oldest active") { - oldest_active=$3 + oldest_active=$3 print "oldest_active.value " $3 } - if ($2 == "Oldest snapshot") + if ($2 == "Oldest snapshot") { - oldest_snapshot=$3 + oldest_snapshot=$3 print "oldest_snapshot.value " $3 } if ($2 == "Next transaction") { - next_transaction=$3 + next_transaction=$3 print "next_transaction.value " $3 print "oldest_trans_gap1.value " $3 - oldest_trans print "oldest_trans_gap2.value " $3 - oldest_snapshot diff --git a/plugins/fr24/fr24 b/plugins/fr24/fr24 index ba7f29dc..67924623 100755 --- a/plugins/fr24/fr24 +++ b/plugins/fr24/fr24 @@ -39,7 +39,7 @@ case $1 in config) echo "graph_title Number of planes in sight graph_info Number of planes in sight with DVB-T receiver. -graph_category other +graph_category tv graph_vlabel Number of planes planes.label planes" exit 0;; diff --git a/plugins/freeradius/freeradius b/plugins/freeradius/freeradius index ae5e3016..b0c97422 100755 --- a/plugins/freeradius/freeradius +++ b/plugins/freeradius/freeradius @@ -42,7 +42,7 @@ fi if [ "$1" = "config" ]; then echo 'graph_title freeradius requests' echo 'graph_args --base 1000 -l 0 ' - if [ -n ${graph_period} ]; then + if [ -n ${graph_period} ]; then echo 'graph_period '${graph_period} fi echo 'graph_vlabel requests / ${graph_period}' diff --git a/plugins/freeradius/freeradius_queue b/plugins/freeradius/freeradius_queue old mode 100644 new mode 100755 diff --git a/plugins/freeradius/freeradius_sqlippools_ b/plugins/freeradius/freeradius_sqlippools_ new file mode 100755 index 00000000..bfaca5b7 --- /dev/null +++ b/plugins/freeradius/freeradius_sqlippools_ @@ -0,0 +1,203 @@ +#!/usr/bin/perl -w +# -*- perl -*- + +use strict; + +=head1 NAME + +freeradius_sqlippools_ - Plugin to monitor IP pool utilisation + +=head1 CONFIGURATION + +This is a wildcard plugin to support fetching the status of multiple sqlippool +instances. + +It can also be linked directly (as with a non-wildcard plugin) to present a +combined graph showing the percentage utilisation of all pools. + +It is likely that a common configuration will apply to all plugins but this +doesn't have to be so: + + [freeradius_sqlippools_*] + env.fr_driver mysql + env.fr_host 192.0.2.1 + env.fr_port 3306 + env.fr_db radius + env.fr_user radius + env.fr_pass radpass + +fr_driver is the name of the Perl DBI driver used in the DSN connection string. +The corresponding DBD module for the driver must be installed. + +You should omit fr_pass and specify fr_passfile to avoid placing the password +in a plugin configuration file that is world accessible, e.g.: + + [freeradius_sqlippools_mypool] + user radmonitor + group radmonitor + env.fr_driver Pg + env.fr_host 192.0.2.2 + env.fr_port 5432 + env.fr_db radius + env.fr_user radmonitor + env.fr_passfile /home/radmonitor/db_pass.txt + +=head1 AUTHORS + +Original Author: Network RADIUS + +=head1 LICENSE + +GPLv2 + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest + +=cut + + +use File::Basename; +use Storable qw(lock_store lock_retrieve); +use DBI; + +use constant STATEFILE => "$ENV{MUNIN_PLUGSTATE}/freeradius_sqlippools.state"; + +use constant SQL => <<'EOF'; +SELECT + DISTINCT pool_name AS pool_name, + COUNT(id) OVER (PARTITION BY pool_name) AS total, + SUM(CASE WHEN expiry_time > NOW() THEN 1 ELSE 0 END) OVER (PARTITION BY pool_name) AS used +FROM radippool +EOF + +my $script = basename($0); +(my $instance) = $script =~ /freeradius_sqlippools_(.+)/; +my $command = $ARGV[0] || 'show'; + +autoconf() if $command eq 'autoconf'; +suggest() if $command eq 'suggest'; +config_instance($instance) if $command eq 'config' && defined $instance; +config_combined() if $command eq 'config' && !defined $instance; +show_instance($instance) if $command eq 'show' && defined $instance; +show_combined() if $command eq 'show' && !defined $instance; + +exit; + + +sub autoconf { + my $results; + eval { + $results = get_pools(1); + }; + if ($results) { + print "yes\n"; + } else { + print "no (Failed to read pool status from database)\n"; + } +} + +sub suggest { + my $pools = get_pools(1); + return unless defined $pools; + print "$_\n" foreach keys %{$pools}; +} + +sub config_instance { + my $instance = shift; + + print <{$instance}; + print "total.value $pool->{total}\n"; + print "used.value $pool->{used}\n"; +} + +sub show_combined { + my $pools = get_pools(); + foreach (keys %{$pools}) { + my $util = $pools->{$_}->{used} * 100 / $pools->{$_}->{total}; + print "$_.value $util\n"; + } +} + +sub get_pools { + + my $no_cache = shift; # Ensure that caching doesn't interfere with reconfiguration + + # Read results from the cache unless stale or told not to + if (!$no_cache && -e STATEFILE && -M STATEFILE < 60/86400) { + return lock_retrieve(STATEFILE); + } + + my $driver = $ENV{'fr_driver'} || 'mysql'; + my $host = $ENV{'fr_host'} || '127.0.0.1'; + my $port = $ENV{'fr_port'} || '3306'; + my $db = $ENV{'fr_db'} || 'radius'; + my $user = $ENV{'fr_user'} || 'radius'; + my $pass = $ENV{'fr_pass'} || 'radpass'; + my $passfile = $ENV{'fr_passfile'}; + + # Read password from a file + if (!defined $pass && defined $passfile) { + open (my $FH, '<', $passfile) || die "Failed to open fr_passfile: $passfile"; + $pass = <$FH>; + chomp $pass; + close $FH; + } + + my $dsn; + if ($driver eq 'Oracle') { + $dsn = "DBI:$driver:$db"; + } else { + $dsn = "DBI:$driver:database=$db;host=$host"; + } + $dsn .= ";port=$port" if $port; + + # Read the results by running our query against the database + my $dbh = DBI->connect($dsn, $user, $pass, { RaiseError => 1, PrintError => 0, AutoCommit => 1 }); + my $sth = $dbh->prepare(SQL); + $sth->execute(); + my $results=$sth->fetchall_hashref('pool_name'); + $sth->finish(); + $dbh->disconnect(); + + # Cache the results + if (!$no_cache && $results) { + lock_store($results,STATEFILE); + } + + return $results; + +} + +# vim:syntax=perl diff --git a/plugins/ftp/proftpd b/plugins/ftp/proftpd index f02c49fd..dbfcbf53 100755 --- a/plugins/ftp/proftpd +++ b/plugins/ftp/proftpd @@ -8,7 +8,7 @@ if [ "$1" = 'config' ]; then echo "graph_title Serveur FTP" echo "graph_category network" echo "graph_vlabel Stats Proftpd" - echo "succes.label Login succes" + echo "succes.label Login success" echo "succes.draw AREA" echo "failed.label Login failed" echo "failed.draw AREA" @@ -16,10 +16,10 @@ fi LOGFILE=${LOGFILE:-"/var/log/proftpd/proftpd.log"} -succes=$(grep -c "successful" "$LOGFILE" ) +success=$(grep -c "successful" "$LOGFILE") failed=$(grep -c "Login failed" "$LOGFILE" ) -echo "succes.value $succes" +echo "succes.value $success" echo "failed.value $failed" exit 0 diff --git a/plugins/ftp/proftpd_bytes b/plugins/ftp/proftpd_bytes index b34f38f7..a4086347 100755 --- a/plugins/ftp/proftpd_bytes +++ b/plugins/ftp/proftpd_bytes @@ -14,51 +14,48 @@ #%# family=auto #%# capabilities=autoconf -MAXLABEL=20 mktempfile () { -mktemp -t $1 -} + mktemp -t "$1" +} LOGFILE=${logfile:-/var/log/proftpd/xferlog} -LOGTAIL=${logtail:-`which logtail`} +LOGTAIL=${logtail:-$(which logtail)} STATEFILE=$MUNIN_PLUGSTATE/xferlog-bytes.offset if [ "$1" = "autoconf" ]; then - if [ -f "${LOGFILE}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then - echo yes - exit 0 - else - echo no - exit 1 - fi + if [ -f "$LOGFILE" ] && [ -n "$LOGTAIL" ] && [ -x "$LOGTAIL" ] ; then + echo yes + else + echo "no (missing logfile or 'logtail' executable)" + fi + exit 0 fi if [ "$1" = "config" ]; then - echo 'graph_title FTP Server Bytes' - echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel FTP Server Bytes' - echo 'graph_category network' - echo 'ftp_get.label Bytes GET' - echo 'ftp_put.label Bytes PUT' - exit 0 + echo 'graph_title FTP Server Bytes' + echo 'graph_args --base 1000 -l 0' + echo 'graph_vlabel FTP Server Bytes' + echo 'graph_category network' + echo 'ftp_get.label Bytes GET' + echo 'ftp_put.label Bytes PUT' + exit 0 fi ftp_get=U ftp_put=U -TEMP_FILE=`mktempfile munin-xferlog-bytes.XXXXXX` +TEMP_FILE=$(mktempfile munin-xferlog-bytes.XXXXXX) -if [ -n "$TEMP_FILE" -a -f "$TEMP_FILE" ] +if [ -n "$TEMP_FILE" ] && [ -f "$TEMP_FILE" ] then - $LOGTAIL ${LOGFILE} $STATEFILE | grep "[[:space:]][oi][[:space:]]" > ${TEMP_FILE} - ftp_get=`grep "[[:space:]]o[[:space:]]" ${TEMP_FILE} | awk '{s += $8} END { if ( s ) print s ; else print "0" }'` - ftp_put=`grep "[[:space:]]i[[:space:]]" ${TEMP_FILE} | awk '{s += $8} END { if ( s ) print s ; else print "0" }'` + "$LOGTAIL" "$LOGFILE" "$STATEFILE" | grep "[[:space:]][oi][[:space:]]" >"$TEMP_FILE" + ftp_get=$(grep "[[:space:]]o[[:space:]]" "$TEMP_FILE" | awk '{s += $8} END { if ( s ) print s ; else print "0" }') + ftp_put=$(grep "[[:space:]]i[[:space:]]" "$TEMP_FILE" | awk '{s += $8} END { if ( s ) print s ; else print "0" }') - /bin/rm -f $TEMP_FILE + /bin/rm -f "$TEMP_FILE" fi -echo "ftp_get.value ${ftp_get}" -echo "ftp_put.value ${ftp_put}" - +echo "ftp_get.value $ftp_get" +echo "ftp_put.value $ftp_put" diff --git a/plugins/ftp/proftpd_count b/plugins/ftp/proftpd_count index 13f50eec..f48e06a3 100755 --- a/plugins/ftp/proftpd_count +++ b/plugins/ftp/proftpd_count @@ -14,51 +14,47 @@ #%# family=auto #%# capabilities=autoconf -MAXLABEL=20 mktempfile () { -mktemp -t $1 -} + mktemp -t "$1" +} LOGFILE=${logfile:-/var/log/proftpd/xferlog} -LOGTAIL=${logtail:-`which logtail`} +LOGTAIL=${logtail:-$(which logtail)} STATEFILE=$MUNIN_PLUGSTATE/xferlog-count.offset if [ "$1" = "autoconf" ]; then - if [ -f "${LOGFILE}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then - echo yes - exit 0 - else - echo no - exit 1 - fi + if [ -f "$LOGFILE" ] && [ -n "$LOGTAIL" ] && [ -x "$LOGTAIL" ] ; then + echo yes + else + echo "no (missing logfile or 'logtail' executable)" + fi + exit 0 fi if [ "$1" = "config" ]; then - echo 'graph_title FTP Server Transfers' - echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel FTP Server Transfers' - echo 'graph_category network' - echo 'ftp_get.label Files GET' - echo 'ftp_put.label Files PUT' - exit 0 + echo 'graph_title FTP Server Transfers' + echo 'graph_args --base 1000 -l 0' + echo 'graph_vlabel FTP Server Transfers' + echo 'graph_category network' + echo 'ftp_get.label Files GET' + echo 'ftp_put.label Files PUT' + exit 0 fi ftp_get=U ftp_put=U -TEMP_FILE=`mktempfile munin-xferlog-count.XXXXXX` +TEMP_FILE=$(mktempfile munin-xferlog-count.XXXXXX) -if [ -n "$TEMP_FILE" -a -f "$TEMP_FILE" ] -then - $LOGTAIL ${LOGFILE} $STATEFILE | grep "[[:space:]][oi][[:space:]]" > ${TEMP_FILE} - ftp_get=`grep "[[:space:]]o[[:space:]]" ${TEMP_FILE} | wc -l` - ftp_put=`grep "[[:space:]]i[[:space:]]" ${TEMP_FILE} | wc -l` +if [ -n "$TEMP_FILE" ] && [ -f "$TEMP_FILE" ]; then + "$LOGTAIL" "$LOGFILE" "$STATEFILE" | grep "[[:space:]][oi][[:space:]]" >"$TEMP_FILE" + ftp_get=$(grep "[[:space:]]o[[:space:]]" "$TEMP_FILE" | wc -l) + ftp_put=$(grep "[[:space:]]i[[:space:]]" "$TEMP_FILE" | wc -l) - /bin/rm -f $TEMP_FILE + /bin/rm -f "$TEMP_FILE" fi -echo "ftp_get.value ${ftp_get}" -echo "ftp_put.value ${ftp_put}" - +echo "ftp_get.value $ftp_get" +echo "ftp_put.value $ftp_put" diff --git a/plugins/ftp/pure-ftpd b/plugins/ftp/pure-ftpd index 2a699778..b77ed934 100755 --- a/plugins/ftp/pure-ftpd +++ b/plugins/ftp/pure-ftpd @@ -20,7 +20,7 @@ # # Configuration: # Maybe need to add following lines to plugins config file -# (e.g. /etc/munin/plugin-conf.d/pure-ftpd) to run pure-ftpwho +# (e.g. /etc/munin/plugin-conf.d/pure-ftpd) to run pure-ftpwho # as user with apropirate privilegs then restart munin-node. # # [pure-ftpd] diff --git a/plugins/ftp/pure-ftpd-bw b/plugins/ftp/pure-ftpd-bw index aa9112d2..6d707894 100755 --- a/plugins/ftp/pure-ftpd-bw +++ b/plugins/ftp/pure-ftpd-bw @@ -11,7 +11,7 @@ # # Configuration: # Maybe need to add following lines to plugins config file -# (e.g. /etc/munin/plugin-conf.d/pure-ftpd) to run pure-ftpwho +# (e.g. /etc/munin/plugin-conf.d/pure-ftpd) to run pure-ftpwho # as user with apropirate privilegs then restart munin-node. # # [pure-ftpd-bw] @@ -42,18 +42,18 @@ if [ "$1" = "autoconf" ]; then exit 0 else echo "no (logtail not found)" - exit 1 + exit 0 fi else echo "no (logfile $LOGFILE does not exist)" - exit 1 + exit 0 fi fi if [ "$1" = "config" ]; then echo 'graph_title Pure Ftpd Bandwidth' echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel Datas sent / received' + echo 'graph_vlabel Data sent / received' echo 'graph_category network' echo 'dl.label Bytes downloaded' echo 'ul.label Bytes uploaded' @@ -73,7 +73,7 @@ if [ -f $TMP1 ]; then echo "ul.value ${ul}" rm $TMP1 else - echo "cant write temp file" + echo "can't write temp file" exit 1 fi diff --git a/plugins/ftp/pure-ftpd-logs b/plugins/ftp/pure-ftpd-logs index 2a0add13..1ebc8668 100755 --- a/plugins/ftp/pure-ftpd-logs +++ b/plugins/ftp/pure-ftpd-logs @@ -32,11 +32,11 @@ if [ "$1" = "autoconf" ]; then exit 0 else echo "no (logtail not found)" - exit 1 + exit 0 fi else echo "no (logfile ${LOGFILE} does not exist)" - exit 1 + exit 0 fi fi diff --git a/plugins/ftp/pureftpd_traffic b/plugins/ftp/pureftpd_traffic index c9ec4c73..d96898e5 100755 --- a/plugins/ftp/pureftpd_traffic +++ b/plugins/ftp/pureftpd_traffic @@ -54,7 +54,7 @@ if (!(-e $logfile)) { open LOG, "<$logfile" or print "Can't open logfile!\n"; my $last_line; while() { - $last_line = $_ if eof; + $last_line = $_ if eof; } close LOG; @em = split(/ /,$last_line); @@ -77,33 +77,33 @@ if (!(-e $logfile)) { $put=0, $mti=0, $dsm=0; - + open LOG, "<$logfile"; @log=; foreach $row (@log) { @parts=split(/ /,$row); $curr_ts = "$parts[3] $parts[4]"; $curr_ts =~ s/\[//g; - $curr_ts =~ s/\]//g; + $curr_ts =~ s/\]//g; if ( Date_Cmp($curr_ts,$last_ts) > 0 ) { if ( $parts[5]=~ /GET/ ) { $get+=int($parts[8]); if ($parts[2] eq "mti") { $mti+=int($parts[8]); - } + } if ($parts[2] eq "dsm") { $dsm+=int($parts[8]); - } + } } if ( $parts[5]=~ /PUT/ ) { $put-=int($parts[8]); if ($parts[2] eq "mti") { $mti-=int($parts[8]); - } + } if ($parts[2] eq "dsm") { $dsm-=int($parts[8]); - } + } } } } diff --git a/plugins/ftp/vsftpd b/plugins/ftp/vsftpd index d329552f..583d3d0f 100755 --- a/plugins/ftp/vsftpd +++ b/plugins/ftp/vsftpd @@ -8,7 +8,7 @@ if [ "$1" = "autoconf" ]; then exit 0 else echo no - exit 1 + exit 0 fi fi diff --git a/plugins/ftp/vsftpd-rel b/plugins/ftp/vsftpd-rel index 25ee99de..27b4dc96 100755 --- a/plugins/ftp/vsftpd-rel +++ b/plugins/ftp/vsftpd-rel @@ -56,7 +56,7 @@ then exit 0 else echo no - exit 1 + exit 0 fi fi diff --git a/plugins/funkytown/denon_x311_volume b/plugins/funkytown/denon_x311_volume index 4dfd2e98..78fdc3e7 100755 --- a/plugins/funkytown/denon_x311_volume +++ b/plugins/funkytown/denon_x311_volume @@ -1,17 +1,17 @@ #!/usr/bin/gawk -f # Denon x311 volume-plugin for munin # Copyright (C) 2010 Kristian Lyngstol -# +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. diff --git a/plugins/games/game b/plugins/games/game old mode 100644 new mode 100755 index 9cb182ad..f248b5bd --- a/plugins/games/game +++ b/plugins/games/game @@ -45,7 +45,7 @@ function printMultigraph($ini_array, $machine_name, $title, $info, $max) { p("graph_category games"); p("graph_info $info"); p("graph_printf %6.0lf"); - + if (isset($ini_array['settings'][$machine_name . '_colour'])) p("players.colour " . $ini_array['settings'][$machine_name . '_colour']); @@ -106,8 +106,8 @@ if (isset($_SERVER['argv'][1])) { // Query the game servers $results = queryServers($ini_array); // Cache the query in the state file - $fp = fopen($state, 'w+') or die("I could not open state file."); - fwrite($fp, serialize($results)); + $fp = fopen($state, 'w+') or die("I could not open state file."); + fwrite($fp, serialize($results)); fclose($fp); @@ -175,7 +175,7 @@ $ini_array = parse_ini_file($config, true); // Load games.ini so we can show pretty game names $games = parse_ini_file('gameq/GameQ/games.ini', true); -$results = unserialize(file_get_contents($state)); +$results = unserialize(file_get_contents($state)); // Print individual game values foreach ($results as $name => $server){ diff --git a/plugins/gearman/gearman_queue b/plugins/gearman/gearman_queue index d24120e1..3b8c9485 100755 --- a/plugins/gearman/gearman_queue +++ b/plugins/gearman/gearman_queue @@ -27,7 +27,7 @@ if (defined $ARGV[0] and $ARGV[0] eq 'config') { print "graph_title Gearman queue\n"; print "graph_args --base 1000\n"; print "graph_vlabel jobs\n"; - print "graph_category other\n"; + print "graph_category cloud\n"; while ( my ($key, $value) = each(%$gearman) ) { my $param = $key."_"."queue"; print "$param.type GAUGE\n"; diff --git a/plugins/gearman/gearman_workers b/plugins/gearman/gearman_workers index 2fe602c6..916ae4aa 100755 --- a/plugins/gearman/gearman_workers +++ b/plugins/gearman/gearman_workers @@ -35,7 +35,7 @@ if (defined $ARGV[0] and $ARGV[0] eq 'config') { print "graph_title Gearman workers\n"; print "graph_args --base 1000\n"; print "graph_vlabel workers\n"; - print "graph_category other\n"; + print "graph_category cloud\n"; foreach my $key (@sorted) { my $param = $key."_"."total"; print "$param.type GAUGE\n"; diff --git a/plugins/geowebcache/geowebcache-bandwidth b/plugins/geowebcache/geowebcache-bandwidth index c2a72e62..f5c90cf1 100755 --- a/plugins/geowebcache/geowebcache-bandwidth +++ b/plugins/geowebcache/geowebcache-bandwidth @@ -1,10 +1,10 @@ #!/usr/bin/perl # -*- perl -*- -# Author Rodolphe Quideville +# Author Rodolphe Quiédeville # Licence : GPLv2 -# Code based on tomcat_volume plugin by -# Rune Nordbe Skillingstad +# Code based on tomcat_volume plugin by +# Rune Nordbøe Skillingstad =head1 NAME @@ -41,7 +41,7 @@ HTTP port numbers =head1 AUTHOR -Rodolphe Quideville +Rodolphe Quiédeville =head1 USAGE @@ -98,7 +98,7 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") { print "graph_title GeoWebCache bandwidth\n"; print "graph_args --base 1024\n"; print "graph_vlabel bit/s\n"; - print "graph_category other\n"; + print "graph_category loadbalancer\n"; print "graph_info Bandwidth graph is an average on the last 60 seconds\n"; print "bandw.draw LINE1\n"; print "bandw.label bandwidth bit/s\n"; @@ -111,11 +111,11 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") { my $ua = LWP::UserAgent->new(timeout => $TIMEOUT); my $response = $ua->request(HTTP::Request->new('GET',$url)); -if ($response->content =~ '60 seconds\d+\d+.\d+ /s\d+(\d+\.\d+) ([km]?)bps') { +if ($response->content =~ '60 seconds\d+\d+.\d+ /s\d+(\d+\.\d+) ([km]?)bps') { my $value = $1; $value = $value * 1024 if ($2 eq 'k'); $value = $value * 1024 * 1024 if ($2 eq 'm'); - print "bandw.value " . $value; + print "bandw.value " . $value . "\n"; } else { print "bandw.value U\n"; } diff --git a/plugins/geowebcache/geowebcache-blankitems b/plugins/geowebcache/geowebcache-blankitems index 74cd6d34..be05230b 100755 --- a/plugins/geowebcache/geowebcache-blankitems +++ b/plugins/geowebcache/geowebcache-blankitems @@ -1,10 +1,10 @@ #!/usr/bin/perl # -*- perl -*- -# Author Rodolphe Quideville +# Author Rodolphe Quiédeville # Licence : GPLv2 -# Code based on tomcat_volume plugin by -# Rune Nordbe Skillingstad +# Code based on tomcat_volume plugin by +# Rune Nordbøe Skillingstad =head1 NAME @@ -41,7 +41,7 @@ HTTP port numbers =head1 AUTHOR -Rodolphe Quideville +Rodolphe Quiédeville =head1 USAGE @@ -98,7 +98,7 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") { print "graph_title GeoWebCache Percent of blank requests\n"; print "graph_args --base 1000\n"; print "graph_vlabel %\n"; - print "graph_category other\n"; + print "graph_category loadbalancer\n"; print "graph_info Blankidth graph is an average on the last 60 seconds\n"; print "blank.draw LINE1\n"; print "blank.label % of blank KML/HTML\n"; @@ -112,11 +112,11 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") { my $ua = LWP::UserAgent->new(timeout => $TIMEOUT); my $response = $ua->request(HTTP::Request->new('GET',$url)); -if ($response->content =~ 'Blank/KML/HTML:(\d+\.?\d+)% of requests') { +if ($response->content =~ 'Blank/KML/HTML:(\d+\.?\d+)% of requests') { my $value = $1; $value = $value * 1024 if ($2 eq 'k'); $value = $value * 1024 * 1024 if ($2 eq 'm'); - print "blank.value " . $value; + print "blank.value " . $value . "\n"; } else { print "blank.value U\n"; } diff --git a/plugins/geowebcache/geowebcache-cache-hits-ratio b/plugins/geowebcache/geowebcache-cache-hits-ratio index 2fa07765..941b62e3 100755 --- a/plugins/geowebcache/geowebcache-cache-hits-ratio +++ b/plugins/geowebcache/geowebcache-cache-hits-ratio @@ -1,10 +1,10 @@ #!/usr/bin/perl # -*- perl -*- -# Author Rodolphe Quideville +# Author Rodolphe Quiédeville # Licence : GPLv2 -# Code based on tomcat_volume plugin by -# Rune Nordbe Skillingstad +# Code based on tomcat_volume plugin by +# Rune Nordbøe Skillingstad =head1 NAME @@ -41,7 +41,7 @@ HTTP port numbers =head1 AUTHOR -Rodolphe Quideville +Rodolphe Quiédeville =head1 USAGE @@ -98,7 +98,7 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") { print "graph_title GeoWebCache cache hit ratio\n"; print "graph_args --base 1000\n"; print "graph_vlabel %\n"; - print "graph_category other\n"; + print "graph_category loadbalancer\n"; print "ratio.label percent\n"; print "ratio.type GAUGE\n"; print "ratio.max 100\n"; @@ -111,8 +111,8 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") { my $ua = LWP::UserAgent->new(timeout => $TIMEOUT); my $response = $ua->request(HTTP::Request->new('GET',$url)); -if ($response->content =~ ' Cache hit ratio:(\d\d?\.?\d+)% of requests' ) { - print "ratio.value " . $1; +if ($response->content =~ 'Cache hit ratio:(\d+\.\d+)% of requests' ) { + print "ratio.value " . $1 . "\n"; } else { print "ratio.value U\n"; } diff --git a/plugins/git/example-graphs/git_commit_behind-day.png b/plugins/git/example-graphs/git_commit_behind-day.png new file mode 100644 index 00000000..9f25de12 Binary files /dev/null and b/plugins/git/example-graphs/git_commit_behind-day.png differ diff --git a/plugins/git/example-graphs/git_commit_behind-week.png b/plugins/git/example-graphs/git_commit_behind-week.png new file mode 100644 index 00000000..0783c3e2 Binary files /dev/null and b/plugins/git/example-graphs/git_commit_behind-week.png differ diff --git a/plugins/git/git_commit_behind b/plugins/git/git_commit_behind new file mode 100755 index 00000000..bcd07319 --- /dev/null +++ b/plugins/git/git_commit_behind @@ -0,0 +1,289 @@ +#! /usr/bin/env python3 + +"""=cut +=head1 NAME + +git_commit_behind - Munin plugin to monitor local git repositories and report +how many commits behind their remote they are + +=head1 NOTES + +This plugin is similar to how apt_all works for apt packages. + +To be able to check how behind a git repository is, we need to run git fetch. +To avoid fetching all repos every 5 minutes (for each munin period) and thus +slowing down the data collection, the git fetch operation is only randomly +triggered (based on env.update.probability). +In case of very time-consuming update operations, you can run them in a +separate cron job. + +=head1 REQUIREMENTS + + - Python3 + - Git + +=head1 INSTALLATION + +Link this plugin, as usual. +For example : + ln -s /path/to/git_commit_behind /etc/munin/plugins/git_commit_behind + +If you wish to update the repositories via cron and not during the plugin +execution (cf CONFIGURATION section), you need a dedicated cron job. + +For example, you can use the following cron : + +# If the git_commit_behind plugin is enabled, fetch git repositories randomly +# according to the plugin configuration. +# By default : once an hour (12 invocations an hour, 1 in 12 chance that the +# update will happen), but ensure that there will never be more than two hours +# (7200 seconds) interval between updates. +*/5 * * * * root if [ -x /etc/munin/plugins/git_commit_behind ]; then /usr/sbin/munin-run git_commit_behind update >/dev/null; fi + +=head1 CONFIGURATION + +Use your "/etc/munin/plugin-conf.d/munin-node" to configure this plugin. + [git_commit_behind] + user [user] + env.git_path /path/to/git + env.update.mode [munin|cron] + env.update.probability 12 + env.update.maxinterval 7200 + +user [user] : required, the owner of the repository checkouts + in case of multiple different owners, use root +env.git_path : optional (default : /usr/bin/git), the path to the git binary. +env.update.mode : optional (default : munin), the update mode. + munin : repositories are git fetched during the pugin execution + cron : a dedicated cron job needs to be used to update the repositories +env.update.probability : optional (default : 12), + runs the update randomly (1 in chances) +env.update.maxinterval : optional (default : 7200), + ensures that the update is run at least every seconds + + +Then, for each repository you want to check, you need the following +configuration block under the git_commit_behind section + env.repo.[repoCode].path /path/to/local/repo + env.repo.[repoCode].name Repo Name + env.repo.[repoCode].user user + env.repo.[repoCode].warning 10 + env.repo.[repoCode].critical 100 + +[repoCode] can only contain letters, numbers and underscores. + +path : mandatory, the local path to your git repository +name : optional (default : [repoCode]), a cleaner name that will be displayed +user : optional (default : empty), the owner of the repository + if set and different from the user running the plugin, the git commands + will be executed as this user +warning : optional (default 10), the warning threshold +critical : optional (default 100), the critical threshold + +For example : + + [git_commit_behind] + user root + + env.repo.munin_contrib.path /opt/munin-contrib + env.repo.munin_contrib.name Munin Contrib + + env.repo.other_repo.path /path/to/other-repo + env.repo.other_repo.name Other Repo + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 VERSION + +1.0.0 + +=head1 AUTHOR + +Neraud (https://github.com/Neraud) + +=head1 LICENSE + +GPLv2 + +=cut""" + + +import logging +import os +from pathlib import Path +import pwd +from random import randint +import re +from shlex import quote +from subprocess import check_output, call, DEVNULL, CalledProcessError +import sys +import time + + +plugin_version = "1.0.0" + +if int(os.getenv('MUNIN_DEBUG', 0)) > 0: + logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s %(levelname)-7s %(message)s') + +current_user = pwd.getpwuid(os.geteuid())[0] + +conf = { + 'git_path': os.getenv('git_path', '/usr/bin/git'), + 'state_file': os.getenv('MUNIN_STATEFILE'), + 'update_mode': os.getenv('update.mode', 'munin'), + 'update_probability': int(os.getenv('update.probability', '12')), + 'update_maxinterval': int(os.getenv('update.maxinterval', '7200')) +} + +repo_codes = set(re.search('repo\.([^.]+)\..*', elem).group(1) + for elem in os.environ.keys() if elem.startswith('repo.')) + +repos_conf = {} +for code in repo_codes: + repos_conf[code] = { + 'name': os.getenv('repo.%s.name' % code, code), + 'path': os.getenv('repo.%s.path' % code, None), + 'user': os.getenv('repo.%s.user' % code, None), + 'warning': os.getenv('repo.%s.warning' % code, '10'), + 'critical': os.getenv('repo.%s.critical' % code, '100') + } + + +def print_config(): + print('graph_title Git repositories - Commits behind') + + print('graph_args --base 1000 -r --lower-limit 0') + print('graph_vlabel number of commits behind') + print('graph_scale yes') + print('graph_info This graph shows the number of commits behind' + + ' for each configured git repository') + print('graph_category file_transfer') + + print('graph_order %s' % ' '.join(repo_codes)) + + for repo_code in repos_conf.keys(): + print('%s.label %s' % (repo_code, repos_conf[repo_code]['name'])) + print('%s.warning %s' % (repo_code, repos_conf[repo_code]['warning'])) + print('%s.critical %s' % + (repo_code, repos_conf[repo_code]['critical'])) + + +def generate_git_command(repo_conf, git_command): + if not repo_conf['user'] or repo_conf['user'] == current_user: + cmd = [quote(conf['git_path'])] + git_command + else: + shell_cmd = 'cd %s ; %s %s' % ( + quote(repo_conf['path']), + quote(conf['git_path']), + ' '.join(git_command)) + cmd = ['su', '-', repo_conf['user'], '-s', '/bin/sh', '-c', shell_cmd] + return cmd + + +def execute_git_command(repo_conf, git_command): + cmd = generate_git_command(repo_conf, git_command) + return check_output(cmd, cwd=repo_conf['path']).decode('utf-8').rstrip() + + +def print_info(): + if not os.access(conf['git_path'], os.X_OK): + print('Git (%s) is missing, or not executable !' % + conf['git_path'], file=sys.stderr) + sys.exit(1) + + for repo_code in repos_conf.keys(): + logging.debug(' - %s' % repo_code) + try: + remote_branch = execute_git_command( + repos_conf[repo_code], + ['rev-parse', '--abbrev-ref', '--symbolic-full-name', '@{u}']) + logging.debug('remote_branch = %s' % remote_branch) + + commits_behind = execute_git_command( + repos_conf[repo_code], + ['rev-list', 'HEAD..%s' % remote_branch, '--count']) + + print('%s.value %d' % (repo_code, int(commits_behind))) + except CalledProcessError as e: + logging.error('Error executing git command : %s', e) + except FileNotFoundError as e: + logging.error('Repo not found at path %s' % + repos_conf[repo_code]['path']) + + +def check_update_repos(mode): + if not conf['state_file']: + logging.error('Munin state file unavailable') + sys.exit(1) + + if mode != conf['update_mode']: + logging.debug('Wrong mode, skipping') + return + + if not os.path.isfile(conf['state_file']): + logging.debug('No state file -> updating') + do_update_repos() + elif (os.path.getmtime(conf['state_file']) + conf['update_maxinterval'] + < time.time()): + logging.debug('State file last modified too long ago -> updating') + do_update_repos() + elif randint(1, conf['update_probability']) == 1: + logging.debug('Recent state, but random matched -> updating') + do_update_repos() + else: + logging.debug('Recent state and random missed -> skipping') + + +def do_update_repos(): + for repo_code in repos_conf.keys(): + try: + logging.info('Fetching repo %s' % repo_code) + execute_git_command(repos_conf[repo_code], ['fetch']) + except CalledProcessError as e: + logging.error('Error executing git command : %s', e) + except FileNotFoundError as e: + logging.error('Repo not found at path %s' % + repos_conf[repo_code]['path']) + logging.debug('Updating the state file') + + # 'touch' the state file to update its last modified date + Path(conf['state_file']).touch() + + +if len(sys.argv) > 1: + action = sys.argv[1] + if action == 'config': + print_config() + elif action == 'autoconf': + errors = [] + + if not conf['state_file']: + errors.append('munin state file unavailable') + + if os.access(conf['git_path'], os.X_OK): + test_git = call([conf['git_path'], '--version'], stdout=DEVNULL) + if test_git != 0: + errors.append('git seems to be broken ?!') + else: + errors.append('git is missing or not executable') + + if errors: + print('no (%s)' % ', '.join(errors)) + else: + print('yes') + elif action == 'version': + print('Git commit behind Munin plugin, version {0}'.format( + plugin_version)) + elif action == 'update': + check_update_repos('cron') + else: + logging.warn("Unknown argument '%s'" % action) + sys.exit(1) +else: + if conf['update_mode'] == 'munin': + check_update_repos('munin') + print_info() diff --git a/plugins/git/gitlab_statistics b/plugins/git/gitlab_statistics new file mode 100755 index 00000000..7a76bf97 --- /dev/null +++ b/plugins/git/gitlab_statistics @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# -*- python -*- + +""" +=head1 INTRODUCTION + +Plugin to monitor Gitlab status + +=head1 INSTALLATION + +Usage: Place in /etc/munin/plugins/ (or link it there using ln -s) + +=head1 CONFIGURATION + +Add this to your /etc/munin/plugin-conf.d/munin-node: + +=over 2 + + [gitlab_statistics] + env.logarithmic 1 + env.hostname gitlab.example.com # required + env.token YourPrivateTokenHere # required + +=back + +=head1 HISTORY + +2019-10-02: v 1.0 pcy : created + +=head1 USAGE + +Parameters understood: + + config (required) + autoconf (optional - used by munin-config) + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf +""" + + +import os +import json +import urllib +import sys + + +def weakbool(x): + return x.lower().strip() in {"true", "yes", "y", "1"} + + +url = None +if 'hostname' in os.environ and 'token' in os.environ: + url = "https://" + os.getenv('hostname') \ + + "/api/v4/application/statistics?private_token=" \ + + os.getenv('token') + +logarithmic = weakbool(os.getenv('logarithmic', 'N')) + + +def reqjson(): + try: + raw_data = urllib.request.urlopen(url) + return json.loads(raw_data) + except IOError: + print("Cannot reach the GitLab API endpoint.", file=sys.stderr) + exit(1) + + +def autoconf(): + if 'hostname' not in os.environ: + print("no ('hostname' envvar not set)") + elif 'token' not in os.environ: + print("no ('token' envvar not set)") + else: + print("yes") + + +def config(): + print("""\ +graph_title GitLab statistics +graph_vlabel amount +graph_category devel""") + if logarithmic: + print("graph_args --logarithmic") + + for x in reqjson().keys(): + print(x + ".label " + x) + + +def fetch(): + rj = reqjson() + for (x, y) in rj.items(): + print("%s.value %d" % (x, int(y.replace(',', '')))) + + +if len(sys.argv) >= 2: + if sys.argv[1] == 'autoconf': + autoconf() + elif sys.argv[1] == 'config': + config() + else: + fetch() +else: + fetch() diff --git a/plugins/glance/glance_size_ b/plugins/glance/glance_size_ index c45455d6..4b3ba99f 100755 --- a/plugins/glance/glance_size_ +++ b/plugins/glance/glance_size_ @@ -13,24 +13,22 @@ # To show tenant name plugin must run as root # # Magic markers -#%# capabilities=autoconf suggest -#%# family=auto +# #%# capabilities=autoconf suggest +# #%# family=auto import sys import os -try: +try: from sqlalchemy.orm import joinedload import sqlalchemy.exc - + from glance.common.cfg import CommonConfigOpts from glance.registry.db import models from glance.registry.db.api import get_session, configure_db from keystone.common import utils - from keystone import config - from keystone import exception - from keystone import identity + from keystone import config, exception, identity except ImportError: successful_import = False @@ -40,7 +38,7 @@ else: def get_name_from_tenant(tenant): try: - KEYSTONE_CONF = config.CONF(config_files=[utils.find_config('keystone.conf')]) + config.CONF(config_files=[utils.find_config('keystone.conf')]) except: # keystone configuration can not be loaded, use id as name" return tenant @@ -53,65 +51,67 @@ def get_name_from_tenant(tenant): # keystone database can not be connected, use id as name" return tenant - if not tenant_info: + if not tenant_info: return tenant else: return tenant_info["name"] - + def load_conf(): - CONF = CommonConfigOpts(project="glance", prog="glance-registry") - CONF() + loaded_config = CommonConfigOpts(project="glance", prog="glance-registry") + loaded_config() # Hide missing logger warning message sys.stderr = open(os.devnull, 'w') - configure_db(CONF) + configure_db(loaded_config) sys.stderr = sys.__stderr__ def print_config(tenant): if tenant == "Global": - print 'graph_title Glance used size for all tenants' - print 'graph_info This graph shows the used size in glance for all tenants' + print('graph_title Glance used size for all tenants') + print('graph_info This graph shows the used size in glance for all tenants') else: - print 'graph_title Glance used size for tenant %s' % get_name_from_tenant(tenant) - print 'graph_info This graph shows the used size in glance for tenant %s' % tenant - print 'graph_vlabel Bytes' - print 'graph_args --base 1024 --lower-limit 0' - print 'graph_category cloud' - print '%s.label %s' % (tenant, get_name_from_tenant(tenant)) - print '%s.draw LINE2' % tenant - print '%s.info %s MBytes' % (tenant, tenant) + print('graph_title Glance used size for tenant %s' % get_name_from_tenant(tenant)) + print('graph_info This graph shows the used size in glance for tenant %s' % tenant) + print('graph_vlabel Bytes') + print('graph_args --base 1024 --lower-limit 0') + print('graph_category cloud') + print('%s.label %s' % (tenant, get_name_from_tenant(tenant))) + print('%s.draw LINE2' % tenant) + print('%s.info %s MBytes' % (tenant, tenant)) + def request(**kwargs): session = get_session() try: - query = session.query(models.Image).\ - options(joinedload(models.Image.properties)).\ - options(joinedload(models.Image.members)) + query = session.query(models.Image).options(joinedload(models.Image.properties)).options( + joinedload(models.Image.members)) if kwargs: - query = query.filter_by(**kwargs) + query = query.filter_by(**kwargs) images = query.all() - except exc.NoResultFound: + except exception.NoResultFound: return [] return images + def print_suggest(): - print "Global" - print "\n".join(set( image["owner"] for image in request(deleted=False) )) + print("Global") + print("\n".join(set(image["owner"] for image in request(deleted=False)))) + def print_values(tenant): - if tenant != "Global" : + if tenant != "Global": images = request(deleted=False, owner=tenant) else: images = request(deleted=False) - total_size = sum([ image["size"] for image in images ]) - print '%s.value %s' % (tenant, total_size) + total_size = sum([image["size"] for image in images]) + print('%s.value %s' % (tenant, total_size)) if __name__ == '__main__': @@ -125,18 +125,17 @@ if __name__ == '__main__': load_conf() print_suggest() elif argv[1] == 'autoconf': - if not successful_import: - print 'no (failed import glance and/or sqlachemy module)' + if not successful_import: + print('no (failed import glance and/or sqlachemy module)') sys.exit(0) try: load_conf() get_session() except: - print 'no (failed to connect glance backend, check user)' + print('no (failed to connect glance backend, check user)') sys.exit(0) - print 'yes' + print('yes') elif successful_import: load_conf() print_values(tenant) - diff --git a/plugins/glance/glance_status b/plugins/glance/glance_status index 95917a3c..26a83e36 100755 --- a/plugins/glance/glance_status +++ b/plugins/glance/glance_status @@ -18,9 +18,9 @@ import sys import os -try: +try: from sqlalchemy.orm import exc, joinedload - + from glance.common.cfg import CommonConfigOpts from glance.registry.db import models from glance.registry.db.api import get_session, configure_db @@ -32,7 +32,7 @@ else: def load_conf(): CONF = CommonConfigOpts(project="glance", prog="glance-registry") CONF() - + # Hide missing logger warning message sys.stderr = open(os.devnull, 'w') configure_db(CONF) @@ -76,7 +76,7 @@ def print_values(): for image in images: n_image_by_status[image["status"]] = n_image_by_status.get(image["status"], 0) + 1 - for status in possible_status: + for status in possible_status: print '%s.value %s' % (status, n_image_by_status.get(status, 0)) if __name__ == '__main__': @@ -86,7 +86,7 @@ if __name__ == '__main__': if argv[1] == 'config': print_config() elif argv[1] == 'autoconf': - if not successful_import: + if not successful_import: print 'no (failed import glance and/or sqlachemy module)' sys.exit(0) try: diff --git a/plugins/glassfish/glassfish_counters_ b/plugins/glassfish/glassfish_counters_ old mode 100644 new mode 100755 diff --git a/plugins/google/google-rank b/plugins/google/google-rank index 00cafdba..e941940f 100755 --- a/plugins/google/google-rank +++ b/plugins/google/google-rank @@ -2,10 +2,10 @@ # Simple munin plugin to find the google rank for a URL/WORD combination # # THIS SCRIPT BREAKS THE TOS OF GOOGLE SO USE WITH CARE AND DON'T BLAME ME IF THINGS GO WRONG -# +# # (c) 2009 i.dobson@planet-ian.com # -# For each url/words that you want to watch you need to create a variable/word pair in your +# For each url/words that you want to watch you need to create a variable/word pair in your # munin-node configuration file for example # #[google_rank] @@ -19,18 +19,18 @@ # Version 0.5 24.1.2009 # Added loop to check the first 500 pages. Note the script sleeps 5 seconds between each page grab so # If the word/url your looking for is in the higher positions then you need to increase the timeout -# +# # Version 0.5 21.1.2009 -# Dump each page grabbed from google into separate files (helps with debugging) +# Dump each page grabbed from google into separate files (helps with debugging) # # Version 0.4 19.1.2009 # Fixed corrupt then empty cache file bug # -# Version 0.3 19.1.2009 +# Version 0.3 19.1.2009 # The script now grabs the google page based on the LASTHIT counter. # The script grabs the google page for URL1, then the next time it's called URL2 etc. If the url/word pair doesn't exist for LASTHIT then the script just dumps the cached data # -# Version 0.2 18.01.2009 +# Version 0.2 18.01.2009 # Cache added, the script only grabs the pages from google every 10 calls # The script still only checks to first 100 pages returned by google # @@ -40,14 +40,12 @@ # Auto Configure, Check it word 1 is defined if [ "$1" = "autoconf" ]; then - if [ "$URL1" != "" ]; then - if [ "$WORD1" != "" ]; then - echo yes - exit 0 - fi + if [ -n "$URL1" ] && [ -n "$WORD1" ]; then + echo yes + else + echo no fi - echo no - exit 1 + exit 0 fi #Configure, loop through each variable defined WORDx URLx dumping it to munin @@ -70,7 +68,7 @@ if [ "$1" = "config" ]; then fi if [ "$WORD" = "" ]; then exit 0 - fi + fi VAR=`echo $URL.$WORD | sed -e "s/http:\/\///g"| sed -e "s/ /_/g"| sed -e "s/\./_/g"| sed -e "s/\-/_/g"` URL=`echo $URL| sed -e "s/http:\/\///g"` echo $VAR.label Pagerank $URL - $WORD @@ -108,7 +106,7 @@ if [ "$URL" != "" ]; then SEARCHWORD=`echo $WORD| sed -e "s/ /%20/g"` until [ "$FOUND" -ne "0" ]; do -#Grab page from google for the WORD/PAGE combination.Pipe it into awk to pull out the url's only, one per line. Then dump only the lines containing the URL defined +#Grab page from google for the WORD/PAGE combination.Pipe it into awk to pull out the url's only, one per line. Then dump only the lines containing the URL defined wget -q --user-agent=Firefox -O - http://www.google.com/search?q=$SEARCHWORD\&num=100\&hl=en\&safe=off\&pwst=1\&start=$start\&sa=N > /tmp/google_rank.$LASTHIT.data VALUE=`cat /tmp/google_rank.$LASTHIT.data|sed 's//\n\1\n/g'|awk -v num=$num -v base=$base '{ if ( $1 ~ /^http/ ) print base,num++,$NF }'|awk '{ print $2 " " $3}'|grep -i $URL| awk '{ print $1}'` VALUE=`echo $VALUE| awk '{ print $1}'` @@ -117,7 +115,7 @@ until [ "$FOUND" -ne "0" ]; do let start="start + 100" sleep 5 else - FOUND=1 + FOUND=1 let VALUE="$VALUE + $start" fi ### echo Start=$start Value=$VALUE Found=$FOUND @@ -139,12 +137,12 @@ done #write data back rm /tmp/google_rank.cache - for iLoop in `seq 1 10`; do + for iLoop in `seq 1 10`; do echo ${Data[$iLoop]} >> /tmp/google_rank.cache done fi -#Reset counter to start +#Reset counter to start if [ "$LASTHIT" -gt 30 ]; then echo 0 > /tmp/google_rank.status fi diff --git a/plugins/google/googlecode b/plugins/google/googlecode index 02d24f5a..50687f90 100755 --- a/plugins/google/googlecode +++ b/plugins/google/googlecode @@ -12,7 +12,7 @@ # (at your option) any later version. # # This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of +# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # @@ -42,12 +42,12 @@ if [ "$1" = "autoconf" ]; then exit 0 fi -# Config. +# Config. if [ "$1" = "config" ]; then echo "graph_title Number of downloads of $PROJECTNAME from Google Code " echo "graph_args --base 1000 --lower-limit 0" echo "graph_vlabel number of downloads" - echo "graph_category other" + echo "graph_category filetransfer" echo "graph_info This graph shows the number of downloads of $PROJECTNAME from Google Code." j=0 for (( i = 1 ; i < $Nfiles ; i=i+5 )) @@ -71,7 +71,7 @@ if [ "$1" = "config" ]; then exit 0 fi -# Pring number of downloads. +# Print number of downloads. j=0 for (( i = 1 ; i < $Nfiles ; i=i+5 )) diff --git a/plugins/gpu/amd_gpu_ b/plugins/gpu/amd_gpu_ index b2fd4500..efdff9dc 100755 --- a/plugins/gpu/amd_gpu_ +++ b/plugins/gpu/amd_gpu_ @@ -8,13 +8,13 @@ amd_gpu_ - Wildcard plugin to monitor AMD GPUs. Uses aticonfig utility, usually bundled with AMD GPU driver, to obtain information. To use this plugin you have to make sure aticonfig will run without an active X -server (i.e. without anyone being logged in via the GUI). For more -information about this issue visit the link below: +server (i.e. without anyone being logged in via the GUI). For more +information about this issue visit the link below: http://www.mayankdaga.com/running-opencl-applications-remotely-on-amd-gpus/ =head1 CONFIGURATION -This is a wildcard plugin. The wildcard prefix link name should be the +This is a wildcard plugin. The wildcard prefix link name should be the value to monitor. This plugin uses the following configuration variables: @@ -54,7 +54,7 @@ faken@fakenmc.com =head1 LICENSE GNU General Public License, version 2 - http://www.gnu.org/licenses/gpl-2.0.html + http://www.gnu.org/licenses/gpl-2.0.html =head1 MAGIC MARKERS @@ -71,7 +71,7 @@ atiConfigExec=${aticonfexec:-'/usr/bin/aticonfig'} # Check if autoconf was requested if [ "$1" = "autoconf" ]; then - # Autoconf only returns yes if aticonfig exists and is executable + # Autoconf only returns yes if aticonfig exists and is executable if [ -x $atiConfigExec ]; then echo yes exit 0 @@ -109,20 +109,20 @@ if [ "$1" = "config" ]; then case $name in temp) echo 'graph_title GPU temperature' - echo 'graph_args -l 0 -u 120' + echo 'graph_args -l 20 -u 120' echo 'graph_vlabel Degrees (C)' echo 'graph_category sensors' echo "graph_info Temperature information for AMD GPUs" nGpusCounter=0 while [ $nGpusCounter -lt $nGpus ] do - gpuName=`echo "$nGpusOutput" | grep "* 0" | cut -f 1,3 --complement -d " "` + gpuName=`echo "$nGpusOutput" | grep "\ $nGpusCounter\.\ " | cut -f 3 -d "." | sed -r 's/^[0-9]+\ //'` echo "temp${nGpusCounter}.warning ${warning:-75}" echo "temp${nGpusCounter}.critical ${critical:-95}" echo "temp${nGpusCounter}.info Temperature information for $gpuName" echo "temp${nGpusCounter}.label Temperature ($gpuName)" : $(( nGpusCounter = $nGpusCounter + 1 )) - done + done ;; clocks) # First determine max clock for each GPU... @@ -138,18 +138,18 @@ if [ "$1" = "config" ]; then echo 'graph_title GPU clock' echo "graph_args -l 0 -u $maxclock" echo 'graph_vlabel MHz' - echo 'graph_category sensors' + echo 'graph_category htc' echo "graph_info Core and memory clock info for AMD GPUs" nGpusCounter=0 while [ $nGpusCounter -lt $nGpus ] do - gpuName=`echo "$nGpusOutput" | grep "* 0" | cut -f 1,3 --complement -d " "` + gpuName=`echo "$nGpusOutput" | grep "\ $nGpusCounter\.\ " | cut -f 3 -d "." | sed -r 's/^[0-9]+\ //'` echo "memclock${nGpusCounter}.info Memory clock information for $gpuName" echo "memclock${nGpusCounter}.label Memory clock ($gpuName)" echo "coreclock${nGpusCounter}.info Core clock information for $gpuName" echo "coreclock${nGpusCounter}.label Core clock ($gpuName)" : $(( nGpusCounter = $nGpusCounter + 1 )) - done + done ;; fan) echo 'graph_title GPU fan speed' @@ -160,26 +160,26 @@ if [ "$1" = "config" ]; then nGpusCounter=0 while [ $nGpusCounter -lt $nGpus ] do - gpuName=`echo "$nGpusOutput" | grep "* 0" | cut -f 1,3 --complement -d " "` + gpuName=`echo "$nGpusOutput" | grep "\ $nGpusCounter\.\ " | cut -f 3 -d "." | sed -r 's/^[0-9]+\ //'` echo "fan${nGpusCounter}.info Fan speed information for $gpuName" echo "fan${nGpusCounter}.label Fan speed ($gpuName)" : $(( nGpusCounter = $nGpusCounter + 1 )) - done + done ;; load) echo 'graph_title GPU load' echo 'graph_args -l 0 -u 100' echo 'graph_vlabel Percentage' - echo 'graph_category sensors' - echo "graph_info GPU load" + echo 'graph_category htc' + echo "graph_info GPU load" nGpusCounter=0 while [ $nGpusCounter -lt $nGpus ] do - gpuName=`echo "$nGpusOutput" | grep "* 0" | cut -f 1,3 --complement -d " "` + gpuName=`echo "$nGpusOutput" | grep "\ $nGpusCounter\.\ " | cut -f 3 -d "." | sed -r 's/^[0-9]+\ //'` echo "load${nGpusCounter}.info Load information for $gpuName" echo "load${nGpusCounter}.label Load ($gpuName)" : $(( nGpusCounter = $nGpusCounter + 1 )) - done + done ;; vcore) echo 'graph_title GPU core voltage' @@ -189,11 +189,11 @@ if [ "$1" = "config" ]; then nGpusCounter=0 while [ $nGpusCounter -lt $nGpus ] do - gpuName=`echo "$nGpusOutput" | grep "* 0" | cut -f 1,3 --complement -d " "` + gpuName=`echo "$nGpusOutput" | grep "\ $nGpusCounter\.\ " | cut -f 3 -d "." | sed -r 's/^[0-9]+\ //'` echo "vcore${nGpusCounter}.info Vcore information for $gpuName" echo "vcore${nGpusCounter}.label Core voltage ($gpuName)" : $(( nGpusCounter = $nGpusCounter + 1 )) - done + done ;; *) echo "Can't run without a proper symlink. Exiting." diff --git a/plugins/gpu/nvidia_gpu_ b/plugins/gpu/nvidia_gpu_ index a61f492b..839b4986 100755 --- a/plugins/gpu/nvidia_gpu_ +++ b/plugins/gpu/nvidia_gpu_ @@ -10,7 +10,7 @@ usually bundled with NVIDIA GPU driver, to obtain information. =head1 CONFIGURATION -This is a wildcard plugin. The wildcard prefix link name should be the +This is a wildcard plugin. The wildcard prefix link name should be the value to monitor. This plugin uses the following configuration variables: @@ -37,8 +37,7 @@ C =item * -Add support for specific professional GPU features such as number of compute -processes, clocks, power draw, utilization, and so on. +Add support for specific professional GPU features such as number of compute processes, clocks and so on. =item * @@ -54,7 +53,7 @@ faken@fakenmc.com =head1 LICENSE GNU General Public License, version 2 - http://www.gnu.org/licenses/gpl-2.0.html + http://www.gnu.org/licenses/gpl-2.0.html =head1 MAGIC MARKERS @@ -64,15 +63,15 @@ faken@fakenmc.com =cut # Determine name of parameter to monitor -name=`basename $0 | sed 's/^nvidia_gpu_//g'` +name=$(basename "$0" | sed 's/^nvidia_gpu_//g') # Get location of nvidia-smi executable or use default nvSmiExec=${smiexec:-'/usr/bin/nvidia-smi'} # Check if autoconf was requested if [ "$1" = "autoconf" ]; then - # Autoconf only returns yes if nvidia-smi exists and is executable - if [ -x $nvSmiExec ]; then + # Autoconf only returns yes if nvidia-smi exists and is executable + if [ -x "$nvSmiExec" ]; then echo yes exit 0 else @@ -87,81 +86,82 @@ if [ "$1" = "suggest" ]; then echo "mem" echo "fan" echo "power" + echo "utilization" exit 0 fi # Get number of GPUs -nGpusOutput=`$nvSmiExec -L` -nGpus=`echo "$nGpusOutput" | wc -l` -if [ $nGpus -eq 0 ]; then +nGpusOutput=$("$nvSmiExec" -L) +nGpus=$(echo "$nGpusOutput" | wc -l) +if [ "$nGpus" -eq 0 ]; then # Exit if no GPUs found echo "No NVIDIA GPUs detected. Exiting." exit 1 fi # Get full output from nvidia-smi -smiOutput=`$nvSmiExec -q` +smiOutput=$("$nvSmiExec" -q) # Check if config was requested if [ "$1" = "config" ]; then # Get driver version - driverVersion=`nvidia-smi -q | grep "Driver Version" | cut -d : -f 2 | tr -d ' '` + driverVersion=$(echo "$smiOutput" | grep "Driver Version" | cut -d : -f 2 | tr -d ' ') # Configure graph depending on what which quantity will be plotted case $name in temp) echo 'graph_title GPU temperature' echo 'graph_args -l 0 -u 120' - echo 'graph_vlabel Degrees (C)' + echo 'graph_vlabel degrees Celsius' echo 'graph_category sensors' echo "graph_info Temperature information for NVIDIA GPUs using driver version $driverVersion" nGpusCounter=0 - while [ $nGpusCounter -lt $nGpus ] + while [ $nGpusCounter -lt "$nGpus" ] do - gpuName=`echo "$nGpusOutput" | sed -n $(( $nGpusCounter + 1 ))p | cut -d \( -f 1` - echo "temp${nGpusCounter}.warning ${warning:-75}" - echo "temp${nGpusCounter}.critical ${critical:-95}" - echo "temp${nGpusCounter}.info Temperature information for $gpuName" - : $(( nGpusCounter = $nGpusCounter + 1 )) - done + gpuName=$(echo "$nGpusOutput" | sed -n $((nGpusCounter+1))p | cut -d \( -f 1) + echo "${name}${nGpusCounter}.warning ${warning:-75}" + echo "${name}${nGpusCounter}.critical ${critical:-95}" + echo "${name}${nGpusCounter}.info Temperature information for $gpuName" + : $((nGpusCounter=nGpusCounter+1)) + done ;; mem) # First determine total memory of each GPU... - gpusTotalMemOutput=`echo "$smiOutput" | grep -v BAR1 | grep -A 3 "Memory Usage" | grep "Total" | cut -d : -f 2 | tr -d ' '` + gpusTotalMemOutput=$(echo "$smiOutput" | grep -v BAR1 | grep -A 3 "Memory Usage" | grep "Total" | cut -d : -f 2 | tr -d ' ') gpusTotalMem='' nGpusCounter=0 - while [ $nGpusCounter -lt $nGpus ] + while [ $nGpusCounter -lt "$nGpus" ] do - gpuName=`echo "$nGpusOutput" | sed -n $(( $nGpusCounter + 1 ))p | cut -d \( -f 1` - echo "mem${nGpusCounter}.info Memory information for $gpuName" - gpuMem=`echo "$gpusTotalMemOutput"| sed -n $(( $nGpusCounter + 1 ))p` + gpuName=$(echo "$nGpusOutput" | sed -n $((nGpusCounter+1))p | cut -d \( -f 1) + echo "${name}${nGpusCounter}.info Memory information for $gpuName" + gpuMem=$(echo "$gpusTotalMemOutput"| sed -n $((nGpusCounter+1))p) gpusTotalMem="${gpusTotalMem}${gpuMem} for GPU ${nGpusCounter}" - : $(( nGpusCounter = $nGpusCounter + 1 )) - if [ $nGpusCounter -lt $nGpus ]; then + : $((nGpusCounter=nGpusCounter+1)) + if [ "$nGpusCounter" -lt "$nGpus" ]; then gpusTotalMem="${gpusTotalMem}, " fi done # ...then output config data. echo 'graph_title GPU memory usage' echo 'graph_args -l 0 -u 100' - echo 'graph_vlabel Percentage' + echo 'graph_vlabel %' echo 'graph_category memory' echo "graph_info FB Memory usage for NVIDIA GPUs using driver version $driverVersion (total memory is $gpusTotalMem)" ;; fan) echo 'graph_title GPU fan speed' echo 'graph_args -l 0 -u 100' - echo 'graph_vlabel Percentage' + echo 'graph_vlabel %' echo 'graph_category sensors' echo "graph_info Fan speed of NVIDIA GPUs using driver version $driverVersion" nGpusCounter=0 - while [ $nGpusCounter -lt $nGpus ] + while [ $nGpusCounter -lt "$nGpus" ] do - gpuName=`echo "$nGpusOutput" | sed -n $(( $nGpusCounter + 1 ))p | cut -d \( -f 1` - echo "fan${nGpusCounter}.info Fan information for $gpuName" - : $(( nGpusCounter = $nGpusCounter + 1 )) - done + gpuName=$(echo "$nGpusOutput" | sed -n $((nGpusCounter+1))p | cut -d \( -f 1) + echo "${name}${nGpusCounter}.info Fan information for $gpuName" + : $((nGpusCounter=nGpusCounter+1)) + done ;; power) echo 'graph_title GPU power consumption' @@ -169,11 +169,25 @@ if [ "$1" = "config" ]; then echo 'graph_category sensors' echo "graph_info power consumption of NVIDIA GPUs using driver version $driverVersion" nGpusCounter=0 - while [ $nGpusCounter -lt $nGpus ] + while [ $nGpusCounter -lt "$nGpus" ] do - gpuName=`echo "$nGpusOutput" | sed -n $(( $nGpusCounter + 1 ))p | cut -d \( -f 1` - echo "power${nGpusCounter}.info power consumption of $gpuName" - : $(( nGpusCounter = $nGpusCounter + 1 )) + gpuName=$(echo "$nGpusOutput" | sed -n $((nGpusCounter+1))p | cut -d \( -f 1) + echo "${name}${nGpusCounter}.info power consumption of $gpuName" + : $((nGpusCounter=nGpusCounter+1)) + done + ;; + utilization) + echo 'graph_title GPU utilization' + echo 'graph_args -l 0 -u 100' + echo 'graph_vlabel %' + echo 'graph_category system' + echo "graph_info GPU utilization of NVIDIA GPUs using driver version $driverVersion" + nGpusCounter=0 + while [ $nGpusCounter -lt "$nGpus" ] + do + gpuName=$(echo "$nGpusOutput" | sed -n $((nGpusCounter+1))p | cut -d \( -f 1) + echo "${name}${nGpusCounter}.info GPU utilization information for $gpuName" + : $((nGpusCounter=nGpusCounter+1)) done ;; *) @@ -185,11 +199,11 @@ if [ "$1" = "config" ]; then # Common stuff for all quantities nGpusCounter=0 - while [ $nGpusCounter -lt $nGpus ] + while [ $nGpusCounter -lt "$nGpus" ] do - gpuName=`echo "$nGpusOutput" | sed -n $(( $nGpusCounter + 1 ))p | cut -d \( -f 1` + gpuName=$(echo "$nGpusOutput" | sed -n $((nGpusCounter+1))p | cut -d \( -f 1) echo "${name}${nGpusCounter}.label $gpuName" - : $(( nGpusCounter = $nGpusCounter + 1 )) + : $((nGpusCounter=nGpusCounter+1)) #print_warning $name #print_critical $name done @@ -200,27 +214,30 @@ fi # Get requested value case $name in temp) - valueGpus=`echo "$smiOutput" | grep -A 1 "Temperature" | grep -i "Gpu" | cut -d : -f 2 | cut -d ' ' -f 2` + valueGpus=$(echo "$smiOutput" | grep -A 1 "Temperature" | grep -i "Gpu" | cut -d : -f 2 | cut -d ' ' -f 2) ;; mem) - totalMemGpus=`echo "$smiOutput" | grep -v BAR1 | grep -A 3 "Memory Usage" | grep "Total" | cut -d : -f 2 | cut -d ' ' -f 2` - usedMemGpus=`echo "$smiOutput" | grep -v BAR1 | grep -A 3 "Memory Usage" | grep "Used" | cut -d : -f 2 | cut -d ' ' -f 2` + totalMemGpus=$(echo "$smiOutput" | grep -v BAR1 | grep -A 3 "Memory Usage" | grep "Total" | cut -d : -f 2 | cut -d ' ' -f 2) + usedMemGpus=$(echo "$smiOutput" | grep -v BAR1 | grep -A 3 "Memory Usage" | grep "Used" | cut -d : -f 2 | cut -d ' ' -f 2) valueGpus='' nGpusCounter=0 - while [ $nGpusCounter -lt $nGpus ] + while [ $nGpusCounter -lt "$nGpus" ] do - totalMemGpu=`echo "$totalMemGpus" | sed -n $(( $nGpusCounter + 1 ))p` - usedMemGpu=`echo "$usedMemGpus" | sed -n $(( $nGpusCounter + 1 ))p` - percentMemUsed=$(( $usedMemGpu * 100 / $totalMemGpu )) + totalMemGpu=$(echo "$totalMemGpus" | sed -n $((nGpusCounter+1))p) + usedMemGpu=$(echo "$usedMemGpus" | sed -n $((nGpusCounter+1))p) + percentMemUsed=$((usedMemGpu*100/totalMemGpu)) valueGpus="${valueGpus}${percentMemUsed}"$'\n' - : $(( nGpusCounter = $nGpusCounter + 1 )) + : $((nGpusCounter=nGpusCounter+1)) done ;; fan) - valueGpus=`echo "$smiOutput" | grep "Fan Speed" | cut -d ':' -f 2 | cut -d ' ' -f 2` + valueGpus=$(echo "$smiOutput" | grep "Fan Speed" | cut -d ':' -f 2 | cut -d ' ' -f 2) ;; power) - valueGpus=`echo "$smiOutput" | grep "Power Draw" | cut -d ':' -f 2 | cut -d ' ' -f 2` + valueGpus=$(echo "$smiOutput" | grep "Power Draw" | cut -d ':' -f 2 | cut -d ' ' -f 2) + ;; + utilization) + valueGpus=$(echo "$smiOutput" | grep "Gpu" | cut -d ':' -f 2 | cut -d ' ' -f 2) ;; *) echo "Can't run without a proper symlink. Exiting." @@ -232,12 +249,9 @@ case $name in # Print requested value nGpusCounter=0 -while [ $nGpusCounter -lt $nGpus ] +while [ $nGpusCounter -lt "$nGpus" ] do - value=`echo "$valueGpus" | sed -n $(( $nGpusCounter + 1 ))p` + value=$(echo "$valueGpus" | sed -n $((nGpusCounter+1))p) echo "${name}${nGpusCounter}.value $value" - : $(( nGpusCounter = $nGpusCounter + 1 )) + : $((nGpusCounter=nGpusCounter+1)) done - - - diff --git a/plugins/gpu/nvidia_smi_ b/plugins/gpu/nvidia_smi_ index f5395d5e..6f6c910d 100755 --- a/plugins/gpu/nvidia_smi_ +++ b/plugins/gpu/nvidia_smi_ @@ -17,7 +17,7 @@ # # THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -# WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # @@ -38,7 +38,7 @@ my $runType = "normal"; my @runTypes = qw( normal config autoconf ); if ($#ARGV + 1 == 1) { - if (grep $_ eq $ARGV[0], @runTypes) + if (grep $_ eq $ARGV[0], @runTypes) { $runType = $ARGV[0]; } @@ -63,10 +63,10 @@ for (my $i = 0; $i < $gpuCount; $i++) { ($ret, my $handle) = nvmlDeviceGetHandleByIndex($i); next if $ret != $NVML_SUCCESS; - + ($ret, my $pciInfo) = nvmlDeviceGetPciInfo($handle); my $gpuName = $pciInfo->{'busId'} if $ret == $NVML_SUCCESS; - + if ($runType eq "config") { # only print the graph information once @@ -78,7 +78,7 @@ for (my $i = 0; $i < $gpuCount; $i++) print "graph_category sensors\n"; print "graph_info Information for NVIDIA GPUs using driver version $driverVersion\n"; } - + # metrics are collected for all the GPUs to a single graph print "GPU_UTIL_$i.label GPU$i - $gpuName : GPU utilization\n"; print "GPU_FANSPEED_$i.label GPU$i - $gpuName : fan speed\n"; diff --git a/plugins/gpu/nvidia_smi_.nocheck b/plugins/gpu/nvidia_smi_.nocheck new file mode 100644 index 00000000..e69de29b diff --git a/plugins/groupwise/snmp__gwia_bytes_ b/plugins/groupwise/snmp__gwia_bytes_ index 8f2f3782..07f74593 100755 --- a/plugins/groupwise/snmp__gwia_bytes_ +++ b/plugins/groupwise/snmp__gwia_bytes_ @@ -23,8 +23,8 @@ # ------------------------------------------------------------ # Plugin to monitor Novell Groupwise Internet Agent (GWIA) # ------------------------------------------------------------ -# -# Management Information Base (MIB) GWIAMIB +# +# Management Information Base (MIB) GWIAMIB # # Naming Tree: 1.3.6.1.4.1.23 # iso(1) org(3) dod(6) internet(1) private(4) enterprises(1) novell(23) @@ -32,7 +32,7 @@ # To see all values available for your GWIA, type # snmpwalk -v1 -c public -m GWIAMIB gwia # -# This plugin fetches: +# This plugin fetches: # # * gwiaGatewayName - 1.3.6.1.4.1.23.2.70.1.1. # * gwiaStatBytesIn - 1.3.6.1.4.1.23.2.70.1.6. @@ -40,31 +40,31 @@ # # Usage: # -------------- -# Link this file snmp__gwia_bytes_ to your nodes servicedir [/etc/munin/plugins] +# Link this file snmp__gwia_bytes_ to your nodes servicedir [/etc/munin/plugins] # -# as: -# snmp__gwia_bytes_ +# as: +# snmp__gwia_bytes_ # # with: -# = Name or IP-Number of host +# = Name or IP-Number of host # = table index of the GWIA Object -# +# # E.g. # ln -s /usr/share/munin/plugins/snmp__gwia_bytes_ \ # /etc/munin/plugins/snmp_foo.example.com_gwia_bytes_0 -# ...will monitor a single GWIA object on host foo.example.com. +# ...will monitor a single GWIA object on host foo.example.com. # # Parameters -# community - Specify wich community string to use (Default: public) +# community - Specify which community string to use (Default: public) # port - Specify which port to read from (Default: 161) # host - Specify which host to monitor (Default: Read from link in servicedir) -# pos - Specify which table Object to read (Default: Read from link in servicedir, -# +# pos - Specify which table Object to read (Default: Read from link in servicedir, +# # You may adjust settings to your need via configuration in plugin-conf.d/munin-node: # [snmp_*_gwia_bytes_*] # env.port # env.community -# env.pos +# env.pos # env.host # # Parameters can also be specified on a per GWIA basis, eg: @@ -96,8 +96,8 @@ my $GRAPH_VLABEL = "bytes per $GRAPH_PERIOD in(-) / out(+)"; my $BYTES_LABEL='Bytes'; if (defined $ARGV[0] and $ARGV[0] eq "snmpconf") -{ - print "require 1.3.6.1.4.1.23.2.70.1.1. [.*]\n"; # gwiaGatewayName +{ + print "require 1.3.6.1.4.1.23.2.70.1.1. [.*]\n"; # gwiaGatewayName print "require 1.3.6.1.4.1.23.2.70.1.6. [\\d*]\n"; # gwiaStatBytesIn print "require 1.3.6.1.4.1.23.2.70.1.5. [\\d*]\n"; # gwiaStatBytesOut @@ -129,8 +129,8 @@ if (!defined ($session)) if (defined $ARGV[0] and $ARGV[0] eq "config") { - # get name of Internet Agent - my $gwname = &get_single ($session, "1.3.6.1.4.1.23.2.70.1.1.$pos"); # gwiaGatewayName + # get name of Internet Agent + my $gwname = &get_single ($session, "1.3.6.1.4.1.23.2.70.1.1.$pos"); # gwiaGatewayName # output to munin print "host_name $host graph_category mail diff --git a/plugins/groupwise/snmp__gwia_msgs_ b/plugins/groupwise/snmp__gwia_msgs_ index 4fb7d9ec..f1c1a9fe 100755 --- a/plugins/groupwise/snmp__gwia_msgs_ +++ b/plugins/groupwise/snmp__gwia_msgs_ @@ -23,8 +23,8 @@ # ------------------------------------------------------------ # Plugin to monitor Novell Groupwise Internet Agent (GWIA) # ------------------------------------------------------------ -# -# Management Information Base (MIB) GWIAMIB +# +# Management Information Base (MIB) GWIAMIB # # Naming Tree: 1.3.6.1.4.1.23 # iso(1) org(3) dod(6) internet(1) private(4) enterprises(1) novell(23) @@ -32,43 +32,43 @@ # To see all values available for your GWIA, type # snmpwalk -v1 -c public -m GWIAMIB gwia # -# This plugin fetches: +# This plugin fetches: # # * gwiaGatewayName - 1.3.6.1.4.1.23.2.70.1.1. # * gwiaStatMsgsOut - 1.3.6.1.4.1.23.2.70.1.7. # * gwiaStatMsgsIn - 1.3.6.1.4.1.23.2.70.1.8. # * gwiaStatStatusesOut - 1.3.6.1.4.1.23.2.70.1.9. -# * gwiaStatStatusesIn - 1.3.6.1.4.1.23.2.70.1.10. -# * gwiaStatErrorsOut - 1.3.6.1.4.1.23.2.70.1.11. +# * gwiaStatStatusesIn - 1.3.6.1.4.1.23.2.70.1.10. +# * gwiaStatErrorsOut - 1.3.6.1.4.1.23.2.70.1.11. # * gwiaStatErrorsIn - 1.3.6.1.4.1.23.2.70.1.12. # # Usage: # -------------- -# Link this file snmp__gwia_msgs_ to your nodes servicedir [/etc/munin/plugins] +# Link this file snmp__gwia_msgs_ to your nodes servicedir [/etc/munin/plugins] # -# as: -# snmp__gwia_msgs_ +# as: +# snmp__gwia_msgs_ # # with: -# = Name or IP-Number of host +# = Name or IP-Number of host # = table index of the GWIA Object -# +# # E.g. # ln -s /usr/share/munin/plugins/snmp__gwia_msgs_ \ # /etc/munin/plugins/snmp_foo.example.com_gwia_msgs_0 -# ...will monitor a single GWIA object on host foo.example.com. +# ...will monitor a single GWIA object on host foo.example.com. # # Parameters -# community - Specify wich community string to use (Default: public) +# community - Specify which community string to use (Default: public) # port - Specify which port to read from (Default: 161) # host - Specify which host to monitor (Default: Read from link in servicedir) -# pos - Specify which table Object to read (Default: Read from link in servicedir, -# +# pos - Specify which table Object to read (Default: Read from link in servicedir, +# # You may adjust settings to your need via configuration in plugin-conf.d/munin-node: # [snmp_*_gwia_msgs_*] # env.port # env.community -# env.pos +# env.pos # env.host # # Parameters can also be specified on a per GWIA basis, eg: @@ -104,8 +104,8 @@ my $ERRORS_LABEL='Errors'; my $ERRORS_CRITICAL=10; if (defined $ARGV[0] and $ARGV[0] eq "snmpconf") -{ - print "require 1.3.6.1.4.1.23.2.70.1.1. [.*]\n"; # gwiaGatewayName +{ + print "require 1.3.6.1.4.1.23.2.70.1.1. [.*]\n"; # gwiaGatewayName print "require 1.3.6.1.4.1.23.2.70.1.7. [\\d*]\n"; # gwiaStatMsgsOut print "require 1.3.6.1.4.1.23.2.70.1.8. [\\d*]\n"; # gwiaStatMsgsIn print "require 1.3.6.1.4.1.23.2.70.1.11. [\\d*]\n"; # gwiaStatErrorsOut @@ -141,8 +141,8 @@ if (!defined ($session)) if (defined $ARGV[0] and $ARGV[0] eq "config") { - # get name of Internet Agent - my $gwname = &get_single ($session, "1.3.6.1.4.1.23.2.70.1.1.$pos"); # gwiaGatewayName + # get name of Internet Agent + my $gwname = &get_single ($session, "1.3.6.1.4.1.23.2.70.1.1.$pos"); # gwiaGatewayName # output to munin print "host_name $host diff --git a/plugins/groupwise/snmp__gwmta_msgs_ b/plugins/groupwise/snmp__gwmta_msgs_ index ee74f06a..5f74ae52 100755 --- a/plugins/groupwise/snmp__gwmta_msgs_ +++ b/plugins/groupwise/snmp__gwmta_msgs_ @@ -23,8 +23,8 @@ # ------------------------------------------------------------ # Plugin to monitor Novell Groupwise MTA (GWMTA) # ------------------------------------------------------------ -# -# Management Information Base (MIB) GWMTA-MIB +# +# Management Information Base (MIB) GWMTA-MIB # # Naming Tree: 1.3.6.1.4.1.23 # iso(1) org(3) dod(6) internet(1) private(4) enterprises(1) novell(23) @@ -32,40 +32,40 @@ # To see all values available for your GWMTA, type # snmpwalk -v1 -c public -m GWMTA-MIB gwmta # -# This plugin fetches: +# This plugin fetches: # -# * mtaDomainName 1.3.6.1.4.1.23.2.37.1.1.1.2. -# * mtaTenMinuteRoutedMsgs - 1.3.6.1.4.1.23.2.37.1.1.1.10. +# * mtaDomainName 1.3.6.1.4.1.23.2.37.1.1.1.2. +# * mtaTenMinuteRoutedMsgs - 1.3.6.1.4.1.23.2.37.1.1.1.10. # * mtaTenMinuteUndeliverableMsgs - 1.3.6.1.4.1.23.2.37.1.1.1.12. # * mtaTenMinuteErrorMsgs - 1.3.6.1.4.1.23.2.37.1.1.1.14. # # Usage: # -------------- -# Link this file snmp__gwmta_msgs_ to your nodes servicedir [/etc/munin/plugins] +# Link this file snmp__gwmta_msgs_ to your nodes servicedir [/etc/munin/plugins] # -# as: -# snmp__gwmta_msgs_ +# as: +# snmp__gwmta_msgs_ # # with: -# = Name or IP-Number of host +# = Name or IP-Number of host # = table index of the GWMTA Object -# +# # E.g. # ln -s /usr/share/munin/plugins/snmp__gwmta_msgs_ \ # /etc/munin/plugins/snmp_foo.example.com_gwmta_msgs_0 -# ...will monitor a single GWMTA object on host foo.example.com. +# ...will monitor a single GWMTA object on host foo.example.com. # # Parameters -# community - Specify wich community string to use (Default: public) +# community - Specify which community string to use (Default: public) # port - Specify which port to read from (Default: 161) # host - Specify which host to monitor (Default: Read from link in servicedir) -# pos - Specify which table Object to read (Default: Read from link in servicedir, -# +# pos - Specify which table Object to read (Default: Read from link in servicedir, +# # You may adjust settings to your need via configuration in plugin-conf.d/munin-node: # [snmp_*_gwmta_msgs_*] # env.port # env.community -# env.pos +# env.pos # env.host # # Parameters can also be specified on a per GWMTA basis, eg: @@ -104,8 +104,8 @@ my $ERRORS_CRITICAL=10; if (defined $ARGV[0] and $ARGV[0] eq "snmpconf") { - print "index 1.3.6.1.4.1.23.2.37.1.1.1.1.\n"; # mtaIndex - print "require 1.3.6.1.4.1.23.2.37.1.1.1.2. \n"; # mtaDomainName + print "index 1.3.6.1.4.1.23.2.37.1.1.1.1.\n"; # mtaIndex + print "require 1.3.6.1.4.1.23.2.37.1.1.1.2. \n"; # mtaDomainName print "require 1.3.6.1.4.1.23.2.37.1.1.1.10. [\\d+]\n"; # mtaTenMinuteRoutedMsgs print "require 1.3.6.1.4.1.23.2.37.1.1.1.12. [\\d+]\n"; # mtaTenMinuteUndeliverableMsgs print "require 1.3.6.1.4.1.23.2.37.1.1.1.14. [\\d+]\n"; # mtaTenMinuteErrorMsgs @@ -137,20 +137,20 @@ if (!defined ($session)) if (defined $ARGV[0] and $ARGV[0] eq "config") { - # get name of domain - my $domain = &get_single ($session, "1.3.6.1.4.1.23.2.37.1.1.1.2.$pos"); # mtaDomainName + # get name of domain + my $domain = &get_single ($session, "1.3.6.1.4.1.23.2.37.1.1.1.2.$pos"); # mtaDomainName # output to munin print "host_name $host graph_category mail graph_args --base 1000 graph_period $GRAPH_PERIOD -graph_title GWMTA load ($domain) -graph_info Monitors status of Groupwise MTA, here: $domain. It reports values for the last 10 minutes. +graph_title GWMTA load ($domain) +graph_info Monitors status of Groupwise MTA, here: $domain. It reports values for the last 10 minutes. graph_vlabel $GRAPH_VLABEL graph_args -l 0 routed.label $ROUTED_LABEL -routed.info mtaTenMinuteRoutedMsgs (1.3.6.1.4.1.23.2.37.1.1.1.10.) +routed.info mtaTenMinuteRoutedMsgs (1.3.6.1.4.1.23.2.37.1.1.1.10.) routed.critical $ROUTED_CRITICAL routed.type GAUGE routed.min 0 diff --git a/plugins/groupwise/snmp__gwpoa_ b/plugins/groupwise/snmp__gwpoa_ index 78051b3f..304cc272 100755 --- a/plugins/groupwise/snmp__gwpoa_ +++ b/plugins/groupwise/snmp__gwpoa_ @@ -23,8 +23,8 @@ # ------------------------------------------------------------ # Plugin to monitor Novell Groupwise Post Office Agent (POA) # ------------------------------------------------------------ -# -# Management Information Base (MIB) GWPOA +# +# Management Information Base (MIB) GWPOA # # Naming Tree: 1.3.6.1.4.1.23 # iso(1) org(3) dod(6) internet(1) private(4) enterprises(1) novell(23) @@ -32,7 +32,7 @@ # To see all values available for your GWPOA, type # snmpwalk -v1 -c public -m GWPOA-MIB gwpoa # -# This plugin fetches: +# This plugin fetches: # # * poaPostOfficeName - 1.3.6.1.4.1.23.2.38.1.1.1.2. # * poaTotalMsgs - 1.3.6.1.4.1.23.2.38.1.1.1.3. @@ -46,10 +46,10 @@ # with the hostname (host) and the table index (pos) of the POA-Object # defined in the linkage. # -# snmp__gwpoa_ +# snmp__gwpoa_ # # with: -# = Name or IP-Number of host +# = Name or IP-Number of host # = Table index of the POA Object # # @@ -57,22 +57,22 @@ # ln -s /usr/share/munin/plugins/snmp__gwpoa_ \ # /etc/munin/plugins/snmp_foo.example.com_gwpoa_1 # -# ...will monitor the first POA-Object on host foo.example.com. +# ...will monitor the first POA-Object on host foo.example.com. # # # Configuration: # -------------- # Parameters -# community - Specify wich community string to use (Default: public) +# community - Specify which community string to use (Default: public) # port - Specify which port to read from (Default: 161) # host - Specify which host to monitor (Default: Read from link in servicedir) -# pos - Specify which table Object to read (Default: Read from link in servicedir, -# +# pos - Specify which table Object to read (Default: Read from link in servicedir, +# # You may adjust settings via configuration in plugin-conf.d/munin-node: # [snmp_*_gwpoa_*] # env.port # env.community -# env.pos +# env.pos # env.host # # Parameters can also be specified on a per POA basis, eg: @@ -103,15 +103,15 @@ my $response; my $GRAPH_PERIOD = "minute"; my $GRAPH_VLABEL = "messages per $GRAPH_PERIOD"; my $TOTAL_LABEL = "TotalMsgs"; -my $UNDELIVERABLE_LABEL = "UndeliverableMsgs"; +my $UNDELIVERABLE_LABEL = "UndeliverableMsgs"; my $PROBLEM_LABEL = "ProblemMsgs"; my $ERRORS_LABEL = "AdmErrorMsgs"; my $ERRORS_CRITICAL = 10; if (defined $ARGV[0] and $ARGV[0] eq "snmpconf") { - print "index 1.3.6.1.4.1.23.2.38.1.1.1.1.\n"; # gwpoa - print "require 1.3.6.1.4.1.23.2.38.1.1.1.2. [.*]\n"; # poaPostOfficeName + print "index 1.3.6.1.4.1.23.2.38.1.1.1.1.\n"; # gwpoa + print "require 1.3.6.1.4.1.23.2.38.1.1.1.2. [.*]\n"; # poaPostOfficeName print "require 1.3.6.1.4.1.23.2.38.1.1.1.3. [\\d*]\n"; # poaTotalMsgs print "require 1.3.6.1.4.1.23.2.38.1.1.1.4. [\\d*]\n"; # poaProblemMsgs print "require 1.3.6.1.4.1.23.2.38.1.1.1.8. [\\d*]\n"; # poaUndeliverableMsgs @@ -145,8 +145,8 @@ if (!defined ($session)) if (defined $ARGV[0] and $ARGV[0] eq "config") { - # get Post Office Name - my $poname = &get_single ($session, "1.3.6.1.4.1.23.2.38.1.1.1.2.$pos"); # poaPostOfficeName + # get Post Office Name + my $poname = &get_single ($session, "1.3.6.1.4.1.23.2.38.1.1.1.2.$pos"); # poaPostOfficeName # output to munin print "host_name $host diff --git a/plugins/gunicorn/gunicorn_memory_status b/plugins/gunicorn/gunicorn_memory_status index 630af52e..b8948e35 100755 --- a/plugins/gunicorn/gunicorn_memory_status +++ b/plugins/gunicorn/gunicorn_memory_status @@ -9,10 +9,10 @@ Like Munin, this plugin is licensed under the GNU GPL v2 license http://www.opensource.org/licenses/GPL-2.0 - If you've put your gunicorn pid somewhere other than the + If you've put your gunicorn pid somewhere other than the default /var/run/gunicorn.pid, you can add a section like this to your munin-node's plugin configuration: - + [gunicorn_*] env.gunicorn_pid_path [path to your gunicorn pid] @@ -50,7 +50,7 @@ class GunicornMemoryStatus(): def _get_master_pid(self): master_pid_file = open(GUNICORN_PID_PATH) self.master_pid = master_pid_file.read().rstrip() - master_pid_file.close() + master_pid_file.close() return True def _get_total_memory(self): @@ -58,7 +58,7 @@ class GunicornMemoryStatus(): total = master +self. _get_worker_memory() total_in_mb = total / 1024 return total_in_mb - + def _get_master_memory(self): master = int(check_output( ['ps', '--pid', self.master_pid, '-o', 'rss', '--no-headers'])) diff --git a/plugins/gunicorn/gunicorn_status b/plugins/gunicorn/gunicorn_status index 471b291d..1d19179a 100755 --- a/plugins/gunicorn/gunicorn_status +++ b/plugins/gunicorn/gunicorn_status @@ -8,10 +8,10 @@ Like Munin, this plugin is licensed under the GNU GPL v2 license http://www.opensource.org/licenses/GPL-2.0 - If you've put your gunicorn pid somewhere other than the + If you've put your gunicorn pid somewhere other than the default /var/run/gunicorn.pid, you can add a section like this to your munin-node's plugin configuration: - + [gunicorn_*] env.gunicorn_pid_path [path to your gunicorn pid] @@ -59,7 +59,7 @@ class GunicornStatus(): def _get_master_pid(self): master_pid_file = open(GUNICORN_PID_PATH) self.master_pid = master_pid_file.read().rstrip() - master_pid_file.close() + master_pid_file.close() def _get_worker_pids(self, master_pid): children = check_output( diff --git a/plugins/haproxy/haproxy-bytes b/plugins/haproxy/haproxy-bytes index 303a849e..3ad78c0c 100755 --- a/plugins/haproxy/haproxy-bytes +++ b/plugins/haproxy/haproxy-bytes @@ -30,7 +30,7 @@ graph_vlabel=${title} case $1 in config) cat <) { + while () { my $url=$_; chomp($url); my $id=get_id($url); @@ -150,7 +150,7 @@ sub read_cache{ my %cache=(); if(-r $file){ open(FILE,'<'.$file); - while () { + while () { m/^(\S*)\s+(.*)$/; $cache{ $1 } = $2; } @@ -297,7 +297,7 @@ sub loadtime_config{ print "graph_total Total\n"; print "graph_info This graph is generated by a set of serial GETs to calculate the total time to load $urls{$id}. "; print "Note that browsers usually fork() the GET requests, resulting in a shorter total loading time.\n"; - + if(keys(%cache)>0){ for my $key ( sort reverse keys %cache ){ my $value=$cache{$key}; @@ -496,7 +496,7 @@ sub cache_values{ my $value=$cache{$key}; if($key =~ m/^([A-Za-z]+)\_(\S+)$/){ my $name=$2; - + if ($1 eq $type){ $name=get_fieldname($name); print $name . ".value " . $value . "\n"; @@ -609,7 +609,7 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { exit(0); } elsif($ARGV[0] and $ARGV[0] eq "cron") { - # This thing is run by cron and should write a cache file for munin-node to + # This thing is run by cron and should write a cache file for munin-node to # read from my $verbose=0; @@ -629,7 +629,7 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { while ( my ($id, $url) = each(%urls) ) { $verbose && print "Fetching $url (id: $id)... \n"; - + $t0=0; $status=0; %output=(); @@ -682,7 +682,7 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { $tag=$$link[0] . " " . $$link[1]; } $output{"tags_" . $$link[0] . "-" . $$link[1]}+=1; - + if(filter($tag)){ $verbose && print " Processing: " . $$link[0] . " " . $$link[1] . " " . $$link[2] . "\n"; @@ -693,7 +693,7 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { } my $suburl=$$link[2]; - + $t0 = [gettimeofday]; $response = $browser->get($suburl); $output{"loadtime_" . $host} += sprintf("%.6f",tv_interval ( $t0, [gettimeofday])); @@ -720,13 +720,13 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { while ( my ($id, $value) = each(%input) ) { $input{$id}="U"; } - + # Adding new values while ( my ($id, $value) = each(%output) ) { $input{$id}=$value; $verbose && print " Result: " . $id . " -> " . $value . "\n"; } - + # Writing the cache $verbose && print "Writing cache file: " . $cachefile . "... "; open(FILE,">".$cachefile); @@ -739,7 +739,7 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { exit(0); }elsif($ARGV[0] and $ARGV[0] eq "config") { my %urls=&read_urls($url_file); - + $debug && print "Reading cache file\n"; my $cachefile=$cachedir . "/" . &get_cache_file_name($scriptname,$id); my %cache=read_cache($cachefile); @@ -773,6 +773,6 @@ if($ARGV[0] and $ARGV[0] eq "autoconf") { } else { cache_values(\%cache, $type); } -} +} # vim:syntax=perl diff --git a/plugins/http/http_request_time b/plugins/http/http_request_time index c8c4b9a0..cb8bebfc 100755 --- a/plugins/http/http_request_time +++ b/plugins/http/http_request_time @@ -4,7 +4,7 @@ This plugin does http requests to specified URLs and takes the response time. Use it to monitor remote sites. - + LWP::UserAgent and Time::HiRes are required =head1 CONFIGURATION @@ -16,6 +16,7 @@ env.url3_name some_munin_internal_name env.url3_label Some random page on our website env.url3_proxy http://firewall:3128 + env.url3_agent Mozilla/5.0 env.timeout 3 Timeout is the timeout of any HTTP request. Tune to avoid a complete @@ -70,11 +71,13 @@ for (my $i = 1; $ENV{"url$i"}; $i++) my $proxy = $ENV{"url${i}_proxy"}; my $name = $ENV{"url${i}_name"} || clean($url); my $label = $ENV{"url${i}_label"} || $url; - + my $agent = $ENV{"url${i}_agent"}; + $URLS{$name}={ url=>$url, proxy=>$proxy, label=>$label, + agent=>$agent, time=>'U' }; } @@ -90,7 +93,7 @@ if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) my $ua = LWP::UserAgent->new(timeout => $timeout); foreach my $url (keys %URLS) { - my $response = $ua->request(HTTP::Request->new('GET',$url)); + my $response = $ua->request(HTTP::Request->new('GET',$URLS{$url}{'url'})); if ($response->is_success) { next; } @@ -141,9 +144,15 @@ if ( defined $ARGV[0] and $ARGV[0] eq "config" ) } my $ua = LWP::UserAgent->new(timeout => $timeout); +my $defaultAgent = $ua->agent; foreach my $name (keys %URLS) { my $url = $URLS{$name}; + if ($url->{agent}) { + $ua->agent($url->{agent}); + } else { + $ua->agent($defaultAgent); + } if ($url->{proxy}) { $ua->proxy(['http', 'ftp'], $url->{proxy}); } @@ -161,7 +170,7 @@ foreach my $name (keys %URLS) { if ($response->is_success) { $$url{'time'}=sprintf("%d",tv_interval($t1,$t2)*1000); - }; + }; }; print("multigraph http_request_time\n"); diff --git a/plugins/http/mongrel_memory b/plugins/http/mongrel_memory index ab54e3f1..9e3b2798 100755 --- a/plugins/http/mongrel_memory +++ b/plugins/http/mongrel_memory @@ -1,89 +1,92 @@ #!/usr/bin/env ruby -# mongrel_memory - A munin plugin for OpenSolaris to monitor memory size of -# each individual mongrel process -# Copyright (C) 2009 Matthias Marschall - mm@agileweboperations.com -# -# Based on: -# mongrel_process_memory - A munin plugin to monitor memory size of -# each individual mongrel process -# Copyright (C) 2007 Ben VandenBos and Avvo, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Author: Ben VandenBos -# Contributors: Adam Jacob () -# Ryan Woodrum -# Matthias Marschall (mm@agileweboperations.com) -# + +=begin + +mongrel_memory - A munin plugin for OpenSolaris to monitor memory size of + each individual mongrel process +Copyright (C) 2009 Matthias Marschall - mm@agileweboperations.com + +Based on: +mongrel_process_memory - A munin plugin to monitor memory size of + each individual mongrel process +Copyright (C) 2007 Ben VandenBos and Avvo, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Author: Ben VandenBos +Contributors: Adam Jacob () + Ryan Woodrum + Matthias Marschall (mm@agileweboperations.com) + #%# family=auto #%# capabilities=autoconf +=end + module Munin class MongrelProcessMemory - def run - pid_port_map = get_pids() - port_list = Hash.new + pid_port_map = get_pids + port_list = {} pid_port_map.sort.each do |pid, port| - rss = `pmap -x #{pid} | grep total`.split(" ")[3] + rss = `pmap -x #{pid} | grep total`.split(' ')[3] puts "mongrel_#{port}.value #{rss}" end end - + def get_pids - h = Hash.new + h = {} pids = [] pids += `pgrep mongrel_rails`.split("\n") pids += `pgrep ruby`.split("\n") - pids.each { |pid| + pids.each do |pid| l = `pargs -l #{pid}` - l =~ /-p (\d+)/ - h[pid] = $1 if $1 - } + l =~ /-p (\d+)/ + h[pid] = Regexp.last_match(1) if Regexp.last_match(1) + end h end def autoconf - get_pids().length > 0 + get_pids.length > 0 end - end end - + mpm = Munin::MongrelProcessMemory.new case ARGV[0] -when "config" - puts "graph_title Mongrel Memory" - puts "graph_vlabel RSS" - puts "graph_category Memory" - puts "graph_args --base 1024 -l 0" - puts "graph_scale yes" - puts "graph_info Tracks the size of individual mongrel processes" +when 'config' + puts 'graph_title Mongrel Memory' + puts 'graph_vlabel RSS' + puts 'graph_category memory' + puts 'graph_args --base 1024 -l 0' + puts 'graph_scale yes' + puts 'graph_info Tracks the size of individual mongrel processes' mpm.get_pids.values.sort.each do |port| puts "mongrel_#{port}.label mongrel_#{port}" puts "mongrel_#{port}.info Process memory" puts "mongrel_#{port}.type GAUGE" puts "mongrel_#{port}.min 0" end -when "autoconf" +when 'autoconf' if mpm.autoconf - puts "yes" + puts 'yes' exit 0 end - puts "no" - exit 1 + puts 'no' + exit 0 else mpm.run end diff --git a/plugins/http/mongrel_process_memory b/plugins/http/mongrel_process_memory index 5966655d..89030874 100755 --- a/plugins/http/mongrel_process_memory +++ b/plugins/http/mongrel_process_memory @@ -1,100 +1,100 @@ #!/usr/bin/env ruby -# -# mongrel_process_memory - A munin plugin to monitor memory size of -# each individual mongrel process -# Copyright (C) 2007 Ben VandenBos and Avvo, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Author: Ben VandenBos -# Contributors: Adam Jacob () -# Ryan Woodrum -# + +=begin + +mongrel_process_memory - A munin plugin to monitor memory size of + each individual mongrel process +Copyright (C) 2007 Ben VandenBos and Avvo, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Author: Ben VandenBos +Contributors: Adam Jacob () + Ryan Woodrum + #%# family=auto #%# capabilities=autoconf +=end + module Munin class MongrelProcessMemory - def run - h = get_pids() - ps_output = "" - #I have no doubt that this is a terrible way of doing this. - h.each do |k, v| - ps_output = ps_output + `ps --no-heading l #{k}` + h = get_pids + ps_output = '' + # I have no doubt that this is a terrible way of doing this. + h.each do |k, _v| + ps_output += `ps --no-heading l #{k}` end if ps_output - port_list = Hash.new + port_list = {} ps_output.each_line do |l| - if l =~ /-p (\d+)/ - port = $1 - l_ary = l.split(/\s+/) - if l_ary.length > 6 - port_list[port] = l_ary[7].to_i * 1024 - end - end + next unless l =~ /-p (\d+)/ + + port = Regexp.last_match(1) + l_ary = l.split(/\s+/) + port_list[port] = l_ary[7].to_i * 1024 if l_ary.length > 6 end port_list.sort.each do |port| puts "mongrel_#{port[0]}.value #{port[1]}" end - - end + + end end - + def get_pids - h = Hash.new + h = {} pids = [] pids = `pgrep mongrel_rails` - pids.each { |p| + pids.each do |p| l = `ps #{p}` - l =~ /-p (\d+)/ - h[p] = $1 - } + l =~ /-p (\d+)/ + h[p] = Regexp.last_match(1) + end h end def autoconf pids.length > 0 end - end end - + mpm = Munin::MongrelProcessMemory.new case ARGV[0] -when "config" - puts "graph_title Mongrel Memory" - puts "graph_vlabel RSS" - puts "graph_category Memory" - puts "graph_args --base 1024 -l 0" - puts "graph_scale yes" - puts "graph_info Tracks the size of individual mongrel processes" +when 'config' + puts 'graph_title Mongrel Memory' + puts 'graph_vlabel RSS' + puts 'graph_category memory' + puts 'graph_args --base 1024 -l 0' + puts 'graph_scale yes' + puts 'graph_info Tracks the size of individual mongrel processes' mpm.get_pids.values.sort.each do |port| puts "mongrel_#{port}.label mongrel_#{port}" puts "mongrel_#{port}.info Process memory" puts "mongrel_#{port}.type GAUGE" puts "mongrel_#{port}.min 0" end -when "autoconf" +when 'autoconf' if mpm.autoconf - puts "yes" + puts 'yes' exit 0 end - puts "no" - exit 1 + puts 'no' + exit 0 else mpm.run end diff --git a/plugins/http/multi_http_responsetime b/plugins/http/multi_http_responsetime index 94086e05..1596b197 100755 --- a/plugins/http/multi_http_responsetime +++ b/plugins/http/multi_http_responsetime @@ -1,7 +1,7 @@ #! /usr/bin/perl # This plugin based on http_responestime designed by Anders Nordby # -# It is written to control the quality of an internet conneting by +# It is written to control the quality of an internet conneting by # downloading a favicon.ico file from a lot - unlimited - count of # domains. # @@ -100,11 +100,11 @@ if ($ARGV[0] && $ARGV[0] eq "autoconf") { $vhost =~ s@^\w+://(.+?)/.*@\1@; $proto =~ s@^(\w+)://.*@\1@; -# If url_array[] is a domain, vhost will be contain the the strinf "http://" +# If url_array[] is a domain, vhost will be contain the the strinf "http://" if($vhost =~ /http/) { print "timespent$i.label $vhost\n"; } else { - print "timespent$i.label $proto://$vhost\n"; + print "timespent$i.label $proto://$vhost\n"; } print "timespent$i.info Ladezeit von $url_array[$i]/favicon.ico\n"; diff --git a/plugins/http/wget_page b/plugins/http/wget_page index 00081281..b9ddb268 100755 --- a/plugins/http/wget_page +++ b/plugins/http/wget_page @@ -97,30 +97,20 @@ default_timeout=20 default_join_lines=true if [ "${1}" = "autoconf" ]; then - result=0 - if [ -z "${wget_bin}" -o ! -f "${wget_bin}" -o ! -x "${wget_bin}" ]; then - result=1 + if [ -z "$wget_bin" ] || [ ! -f "$wget_bin" ] || [ ! -x "$wget_bin" ]; then + echo "no (missing 'wget' executable)" + elif [ -z "$time_bin" ] || [ ! -f "$time_bin" ] || [ ! -x "$time_bin" ]; then + echo "no (missing 'time' executable)" + elif [ -z "$mktemp_bin" ] || [ ! -f "$mktemp_bin" ] || [ ! -x "$mktemp_bin" ]; then + echo "no (missing 'mktemp' executable)" + elif [ -z "$grep_bin" ] || [ ! -f "$grep_bin" ] || [ ! -x "$grep_bin" ]; then + echo "no (missing 'grep' executable)" + elif [ -z "$tail_bin" ] || [ ! -f "$tail_bin" ] || [ ! -x "$tail_bin" ]; then + echo "no (missing 'tail' executable)" else - if [ -z "${time_bin}" -o ! -f "${time_bin}" -o ! -x "${time_bin}" ]; then - result=2 - else - if [ -z "${mktemp_bin}" -o ! -f "${mktemp_bin}" -o ! -x "${mktemp_bin}" ]; then - result=3 - else - if [ -z "${grep_bin}" -o ! -f "${grep_bin}" -o ! -x "${grep_bin}" ]; then - result=4 - else - [ -z "${tail_bin}" -o ! -f "${tail_bin}" -o ! -x "${tail_bin}" ] && result=5 - fi - fi - fi - fi - if [ ${result} -eq 0 ]; then echo "yes" - else - echo "no" fi - exit $result + exit 0 fi if [ -z "${names}" ]; then @@ -200,7 +190,7 @@ for name in ${names}; do [ -z "${iregex_header}" -a -z "${iregex_body}" ] && break if [ ${K} -eq 1 ]; then OIFS="${IFS}" - # we skip carrige return characters from the end of header lines + # we skip carriage return characters from the end of header lines IFS=$(echo -en "\r") inheader=0 # The "read" command reads only lines terminated by a specific diff --git a/plugins/icecast/icecast2 b/plugins/icecast/icecast2 index 07de2966..de3742aa 100755 --- a/plugins/icecast/icecast2 +++ b/plugins/icecast/icecast2 @@ -1,21 +1,21 @@ #!/bin/sh -# +# # Plugin to monitor icecast2 streams / listeners -# +# # Contributed by drew Roberts # # based on the postfix_mailqueue plugin as per below -# +# # Plugin to monitor postfix mail spools -# +# # Contributed by Nicolai Langfeldt -# +# # $Log$ # Revision 1.0 2008/07/04 16:02:36 zotz # Initial work # -# +# #%# family=auto #%# capabilities=autoconf @@ -26,11 +26,11 @@ case $1 in autoconf|detect) if [ -d $ICEDIR/ ] ; then echo yes - exit 0 else echo "no (icedir not found)" - exit 1 - fi;; + fi + exit 0 + ;; config) cat <<'EOF' graph_title Icecast2 Stream Listeners diff --git a/plugins/icecast/icecast2_ b/plugins/icecast/icecast2_ old mode 100644 new mode 100755 index 4b0eac8e..754c8122 --- a/plugins/icecast/icecast2_ +++ b/plugins/icecast/icecast2_ @@ -1,6 +1,12 @@ #! /usr/bin/python # -*- coding: iso-8859-1 -*- +import os +import sys +import urllib2 +from xml.dom import minidom + + # Hostname of Icecast server # Just canonical name, no http:// nor ending / host = "foo.bar.com" @@ -21,14 +27,13 @@ oggbitrates = [56, 128, 172] # well the total number of listeners for any configured stream. # For each stream with multiple bitrates, create one # icecast2_streamname -# If the name contains a "-" exchange it with a "_", and the script will change it back for you. This is to satisfy internal requirements of Munin. +# If the name contains a "-" exchange it with a "_", and the script will change it back for you. +# This is to satisfy internal requirements of Munin. # For each streamname, the plugin will check for the configured bitrates # Expecting the mountpoints to be on the form of # /streamname_ for mp3 # /streamname_.ogg for Ogg/Vorbis -import urllib2, os.path, time, sys -from xml.dom import minidom def hent_XML(): auth_handler = urllib2.HTTPBasicAuthHandler() @@ -45,109 +50,111 @@ def hent_XML(): xmldoc = minidom.parseString(xml) xmldoc = xmldoc.firstChild - #Totalt antall lyttere + # Totalt antall lyttere total_lyttere = xmldoc.getElementsByTagName("clients")[0].firstChild.nodeValue - #Totalt antall kilder + # Totalt antall kilder total_kilder = xmldoc.getElementsByTagName("sources")[0].firstChild.nodeValue - #Status for enkelt strøm + # Status for enkelt strøm sources = xmldoc.getElementsByTagName("source") sourcelist = {} for source in sources: mount = source.getAttribute("mount") listeners = source.getElementsByTagName("listeners")[0].firstChild.nodeValue name = source.getElementsByTagName("server_name")[0].firstChild.nodeValue - mount = mount.replace("-", "_") + mount = mount.replace("-", "_") sourcelist[mount[1:]] = (listeners, name) sourcename = sys.argv[0].split("/")[-1][len("icecast2_"):] if len(sys.argv) == 1: - sys.argv.append("") + sys.argv.append("") if sys.argv[1] == "autoconf": - print "yes" + print "yes" elif sys.argv[1] == "config": - if sourcename == "total": - print "graph_title Totalt antall lyttere" + if sourcename == "total": + print "graph_title Totalt antall lyttere" print "graph_vlabel lyttere" - print "graph_category streaming" - print "totallyttere.label Totalt antall lyttere" - print "totalkilder.label Totalt antall kilder" - chanlist = {} - for a, b, filelist in os.walk("/etc/munin/plugins"): - for file in filelist: - if file.find("icecast2_") != -1: - channelname = file[len("icecast2_"):] - if channelname != "total" and chanlist.has_key(channelname) != 1: - chanlist[channelname] = 0 - chanlist = chanlist.keys() - chanlist.sort() - for chan in chanlist: - graphtitle = "" - for key in sourcelist.keys(): - if key.find(chan) != -1: - l, graphtitle = sourcelist[key] - break - if graphtitle == "": - graphtitle = chan - print "%s.label %s" % (chan, graphtitle) + print "graph_category streaming" + print "totallyttere.label Totalt antall lyttere" + print "totalkilder.label Totalt antall kilder" + chanlist = {} + for a, b, filelist in os.walk("/etc/munin/plugins"): + for file in filelist: + if file.find("icecast2_") != -1: + channelname = file[len("icecast2_"):] + if channelname != "total" and channelname not in chanlist: + chanlist[channelname] = 0 + chanlist = chanlist.keys() + chanlist.sort() + for chan in chanlist: + graphtitle = "" + for key in sourcelist.keys(): + if key.find(chan) != -1: + l, graphtitle = sourcelist[key] + break + if graphtitle == "": + graphtitle = chan + print "%s.label %s" % (chan, graphtitle) - else: - sumstring = "" - graphtitle = "" - for key in sourcelist.keys(): - if key.find(sourcename) != -1: - l, graphtitle = sourcelist[key] - break - if graphtitle == "": - graphtitle = sourcename - print "graph_title %s" % graphtitle - print "graph_vlabel lyttere" - print "graph_category streaming" - for bitrate in mp3bitrates: - print "%s_%s.label %s-%s" % (sourcename, bitrate, "/" + sourcename.replace("_", "-"), bitrate) - sumstring += "%s_%s " % (sourcename, bitrate) - print "%s_%s.critical -0.5:" % (sourcename, bitrate) - for bitrate in oggbitrates: - print "%s_%s_ogg.label %s-%s.ogg" % (sourcename, bitrate, "/" + sourcename.replace("_", "-"), bitrate) - print "%s_%s_ogg.critical -0.5:" % (sourcename, bitrate) - sumstring += "%s_%s_ogg " % (sourcename, bitrate) - print "%slyttere.label Totalt antall lyttere" % sourcename + else: + sumstring = "" + graphtitle = "" + for key in sourcelist.keys(): + if key.find(sourcename) != -1: + l, graphtitle = sourcelist[key] + break + if graphtitle == "": + graphtitle = sourcename + print "graph_title %s" % graphtitle + print "graph_vlabel lyttere" + print "graph_category streaming" + for bitrate in mp3bitrates: + print "%s_%s.label %s-%s" % (sourcename, bitrate, + "/" + sourcename.replace("_", "-"), bitrate) + sumstring += "%s_%s " % (sourcename, bitrate) + print "%s_%s.critical -0.5:" % (sourcename, bitrate) + for bitrate in oggbitrates: + print "%s_%s_ogg.label %s-%s.ogg" % (sourcename, bitrate, + "/" + sourcename.replace("_", "-"), bitrate) + print "%s_%s_ogg.critical -0.5:" % (sourcename, bitrate) + sumstring += "%s_%s_ogg " % (sourcename, bitrate) + print "%slyttere.label Totalt antall lyttere" % sourcename print "%slyttere.sum %s" % (sourcename, sumstring) elif sys.argv[1] != "config": - if sourcename == "total": - print "totallyttere.value %s" % total_lyttere - print "totalkilder.value %s" % total_kilder - statslist = {} - for a, b, filelist in os.walk("/etc/munin/plugins"): - for file in filelist: - if file.find("icecast2_") != -1: - channelname = file[len("icecast2_"):] - if channelname != "total" and statslist.has_key(channelname) != 1: - statslist[channelname] = 0 + if sourcename == "total": + print "totallyttere.value %s" % total_lyttere + print "totalkilder.value %s" % total_kilder + statslist = {} + for a, b, filelist in os.walk("/etc/munin/plugins"): + for file in filelist: + if file.find("icecast2_") != -1: + channelname = file[len("icecast2_"):] + if channelname != "total" and channelname not in statslist: + statslist[channelname] = 0 - for source in sourcelist: - listeners, name = sourcelist[source] - if not statslist.has_key(source[:source.rfind("_")]): - statslist[source[:source.rfind("_")]] = 0 - statslist[source[:source.rfind("_")]] += int(listeners) - for stat in statslist: - print "%s.value %s" % (stat, statslist[stat]) - else: - for bitrate in mp3bitrates: - if sourcelist.has_key("%s_%s" % (sourcename, bitrate)): - listeners = sourcelist["%s_%s" % (sourcename, bitrate)][0] - print listeners - else: - listeners = -1 - print "%s_%s.value %s" % (sourcename, bitrate, listeners) - for bitrate in oggbitrates: - if sourcelist.has_key("%s_%s.ogg" % (sourcename, bitrate)): - listeners = sourcelist["%s_%s.ogg" % (sourcename, bitrate)][0] - else: - listeners = -1 - print "%s_%s_ogg.value %s" % (sourcename, bitrate, listeners) + for source in sourcelist: + listeners, name = sourcelist[source] + if source[:source.rfind("_")] not in statslist: + statslist[source[:source.rfind("_")]] = 0 + statslist[source[:source.rfind("_")]] += int(listeners) + for stat in statslist: + print "%s.value %s" % (stat, statslist[stat]) + else: + for bitrate in mp3bitrates: + if ("%s_%s" % (sourcename, bitrate)) in sourcelist: + listeners = sourcelist["%s_%s" % (sourcename, bitrate)][0] + print listeners + else: + listeners = -1 + print "%s_%s.value %s" % (sourcename, bitrate, listeners) + for bitrate in oggbitrates: + if ("%s_%s.ogg" % (sourcename, bitrate)) in sourcelist: + listeners = sourcelist["%s_%s.ogg" % (sourcename, bitrate)][0] + else: + listeners = -1 + print "%s_%s_ogg.value %s" % (sourcename, bitrate, listeners) else: print sys.argv[1] - + if __name__ == "__main__": hent_XML() diff --git a/plugins/icecast/icecast2_all b/plugins/icecast/icecast2_all old mode 100644 new mode 100755 index 793403e8..d9745e8b --- a/plugins/icecast/icecast2_all +++ b/plugins/icecast/icecast2_all @@ -1,5 +1,15 @@ #! /usr/bin/python # -*- coding: iso-8859-1 -*- +""" +This plugin shows the statistics of every source currently connected to the Icecast2 server. +See the Icecast2_ plugin for specific mountpoint plugin. +""" + + +import sys +import urllib2 +from xml.dom import minidom + # Hostname of Icecast server # Just canonical name, no http:// nor ending / @@ -9,10 +19,6 @@ username = "admin" password = "" realm = "Icecast2 Server" -# This plugin shows the statistics of every source currently connected to the Icecast2 server. See the Icecast2_ plugin for specific mountpoint plugin. - -import urllib2, os.path, time, sys -from xml.dom import minidom def hent_XML(): auth_handler = urllib2.HTTPBasicAuthHandler() @@ -29,44 +35,44 @@ def hent_XML(): xmldoc = minidom.parseString(xml) xmldoc = xmldoc.firstChild - #Totalt antall lyttere + # Totalt antall lyttere total_lyttere = xmldoc.getElementsByTagName("clients")[0].firstChild.nodeValue - #Totalt antall kilder + # Totalt antall kilder total_kilder = xmldoc.getElementsByTagName("sources")[0].firstChild.nodeValue - #Status for enkelt strøm + # Status for enkelt strøm sources = xmldoc.getElementsByTagName("source") sourcelist = {} for source in sources: mount = source.getAttribute("mount") listeners = source.getElementsByTagName("listeners")[0].firstChild.nodeValue name = source.getElementsByTagName("server_name")[0].firstChild.nodeValue - mount = mount.replace("-", "_").replace(".", "_") + mount = mount.replace("-", "_").replace(".", "_") sourcelist[mount[1:]] = (listeners, name) if len(sys.argv) > 0 and sys.argv[1] == "autoconf": - print "yes" + print "yes" elif len(sys.argv) == 1 or sys.argv[1] != "config": print "totallyttere.value %s" % total_lyttere print "totalkilder.value %s" % total_kilder - sourcesort = sourcelist.keys() - sourcesort.sort() + sourcesort = sourcelist.keys() + sourcesort.sort() for source in sourcesort: listeners, name = sourcelist[source] print "%s.value %s" % (source, listeners) elif sys.argv[1] == "config": print "graph_title Total number of listeners" print "graph_vlabel listeners" - print "graph_category streaming" + print "graph_category streaming" print "totallyttere.label Total number of listeners" print "totalkilder.label Totalt number of sources" - sourcesort = sourcelist.keys() - sourcesort.sort() - for source in sourcesort: - listeners, name = sourcelist[source] - print "%s.label %s" % (source, "/" + source) + sourcesort = sourcelist.keys() + sourcesort.sort() + for source in sourcesort: + listeners, name = sourcelist[source] + print "%s.label %s" % (source, "/" + source) else: print sys.argv[1] - + if __name__ == "__main__": hent_XML() diff --git a/plugins/icecast/icecast2_simple b/plugins/icecast/icecast2_simple index c69ee2a2..86c2b38b 100755 --- a/plugins/icecast/icecast2_simple +++ b/plugins/icecast/icecast2_simple @@ -1,7 +1,7 @@ -#!/usr/bin/ruby +#!/usr/bin/env ruby # # Plugin author: Gunnar Wolf -# +# # You are hereby granted authorization to copy, use, modify, distribute, # and in general do anything you please with this plugin. It is too simple # even to GPL-protect it. @@ -18,23 +18,23 @@ require 'open-uri' def get_conf # Default values - conf = {:host => '127.0.0.1', :port => 8000, - :username => 'admin', :password => 'hackme' } + conf = { host: '127.0.0.1', port: 8000, + username: 'admin', password: 'hackme' } conf.keys.each do |key| - env_key = sprintf('icecast_%s', key) + env_key = format('icecast_%s', key) conf[key] = ENV[env_key] if ENV.has_key?(env_key) end conf end def get_data(conf) - begin - data = Hpricot(open(sprintf('http://%s:%s/admin/stats', - conf[:host], conf[:port]), - :http_basic_authentication=>[conf[:username], - conf[:password]])) + begin + data = Hpricot(open(format('http://%s:%s/admin/stats', + conf[:host], conf[:port]), + http_basic_authentication: [conf[:username], + conf[:password]])) rescue OpenURI::HTTPError - puts "Cannot connect: HTTP connection error" + puts 'Cannot connect: HTTP connection error' exit 1 end data @@ -42,13 +42,13 @@ end def get_values(data) vals = {} - [:sources, :clients].each do |key| - elem = data/key - if elem.nil? - vals[key] = 0 - else - vals[key] = elem.innerHTML - end + %i[sources clients].each do |key| + elem = data / key + vals[key] = if elem.nil? + 0 + else + elem.innerHTML + end end vals end @@ -56,15 +56,16 @@ end data = get_data(get_conf) vals = get_values(data) -if ARGV[0] == 'autoconf' +case ARGV[0] +when 'autoconf' puts 'yes' -elsif ARGV[0] == 'config' - puts "graph_title Total sources and clients for Icecast" - puts "graph_vlabel listeners" - puts "graph_category streaming" - puts "sources.label Total number of sources" - puts "clients.label Total number of clients" +when 'config' + puts 'graph_title Total sources and clients for Icecast' + puts 'graph_vlabel listeners' + puts 'graph_category streaming' + puts 'sources.label Total number of sources' + puts 'clients.label Total number of clients' else - puts "sources.value " + vals[:sources] - puts "clients.value " + vals[:clients] + puts 'sources.value ' + vals[:sources] + puts 'clients.value ' + vals[:clients] end diff --git a/plugins/icecast/icecast_ b/plugins/icecast/icecast_ index 80fbbfb4..0aca7253 100755 --- a/plugins/icecast/icecast_ +++ b/plugins/icecast/icecast_ @@ -50,8 +50,8 @@ pw = "yourpassword" #exclude = ("123.123.123.123",) exclude = () -# Exclude these sources from calculation. This is useful to excluse special sources like -# fallback sources which doesn't expose the same informations and then break this script +# Exclude these sources from calculation. This is useful to excluse special sources like +# fallback sources which doesn't expose the same information and then break this script # Ever add fallback sources to this list #source_exclude = ["/fallback.mp3", "/fallback.ogg"] source_exclude = ("/fallback.mp3", "/fallback.ogg") @@ -141,7 +141,7 @@ except IndexError: if plugin_name == "icecast_uptime": for s in sources: print "%s.value %s" % (s["mount"].strip("/").replace(".","_").replace("-","_"), int(s["connected"]) / 3600.) - + elif plugin_name == "icecast_traffic": f = opener.open("http://%s/admin/stats.xml" % server) tree = ElementTree() diff --git a/plugins/icinga/icinga_checks b/plugins/icinga/icinga_checks new file mode 100755 index 00000000..9e835b09 --- /dev/null +++ b/plugins/icinga/icinga_checks @@ -0,0 +1,112 @@ +#!/bin/sh + +: << =cut +=head1 NAME + +icinga_checks - Plugin to monitor results of icinga monitoring + +=head1 CONFIGURATION + +No configuration + +=head1 AUTHOR + +mafri with help by sumpfralle and ndo84bw + +=head1 LICENSE + +GPLv3 + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=cut + +ICINGACLI=${ICINGACLI:-$(command -v icingacli)} +JQ=${JQ:-$(command -v jq)} + + +if [ "$1" = "autoconf" ] ; then + if [ ! -x "$ICINGACLI" ]; then + echo "no (could not find 'icingacli')" + elif [ ! -x "$JQ" ]; then + echo "no (could not find 'jq')" + else + echo "yes" + fi + exit +fi + +set -e + +if [ "$1" = "config" ]; then + + echo "multigraph icinga_host_checks" + echo "graph_title Icinga Host Checks" + echo 'graph_args --base 1000' + echo 'graph_vlabel Count' + echo 'graph_category icinga' + echo "up.label Up" + echo "down.label Down" + echo "unreachable.label Unreachable" + echo "pending.label Pending" + echo "up.draw AREA" + echo "down.draw STACK" + echo "unreachable.draw STACK" + echo "pending.draw STACK" + + + echo "multigraph icinga_service_checks" + echo "graph_title Icinga Service Checks" + echo 'graph_args --base 1000' + echo 'graph_vlabel Count' + echo 'graph_category icinga' + echo "ok.label Ok" + echo "warning.label Warning" + echo "critical.label Critical" + echo "unknown.label Unknown" + echo "pending.label Pending" + echo "ok.draw AREA" + echo "warning.draw STACK" + echo "critical.draw STACK" + echo "unknown.draw STACK" + echo "pending.draw STACK" + + exit +fi + +if [ ! -x "$ICINGACLI" ]; then + echo "could not find 'icingacli'" >&2 + exit 1 +elif [ ! -x "$JQ" ]; then + echo "could not find 'jq'" >&2 + exit 1 +fi + +output=$("$ICINGACLI" monitoring list hosts --format=json) +host_up=$( echo "$output" | "$JQ" -r '.[] | select(.host_state == 0) | .host_name' | wc -l ) +host_down=$( echo "$output" | "$JQ" -r '.[] | select(.host_state == 1) | .host_name' | wc -l ) +host_pend=$( echo "$output" | "$JQ" -r '.[] | select(.host_state == 2) | .host_name' | wc -l ) +host_unre=$( echo "$output" | "$JQ" -r '.[] | select(.host_state == 3) | .host_name' | wc -l ) + +echo "multigraph icinga_host_checks" +echo "up.value $host_up" +echo "down.value $host_down" +echo "pending.value $host_pend" +echo "unreachable.value $host_unre" + +output=$("$ICINGACLI" monitoring list services --format=json) +service_ok=$( echo "$output" | "$JQ" -r '.[] | select(.service_state == 0) | .host_name + .service_name' | wc -l ) +service_warn=$(echo "$output" | "$JQ" -r '.[] | select(.service_state == 1) | .host_name + .service_name' | wc -l ) +service_crit=$(echo "$output" | "$JQ" -r '.[] | select(.service_state == 2) | .host_name + .service_name' | wc -l ) +service_pend=$(echo "$output" | "$JQ" -r '.[] | select(.service_state == 3) | .host_name + .service_name' | wc -l ) +service_unkn=$(echo "$output" | "$JQ" -r '.[] | select(.service_state == 4) | .host_name + .service_name' | wc -l ) + +echo "multigraph icinga_service_checks" +echo "ok.value $service_ok" +echo "warning.value $service_warn" +echo "critical.value $service_crit" +echo "unknown.value $service_unkn" +echo "pending.value $service_pend" diff --git a/plugins/ilias/example-graphs/ilias_session-day.png b/plugins/ilias/example-graphs/ilias_session-day.png new file mode 100644 index 00000000..130604cf Binary files /dev/null and b/plugins/ilias/example-graphs/ilias_session-day.png differ diff --git a/plugins/ilias/ilias_session b/plugins/ilias/ilias_session new file mode 100755 index 00000000..c04bccc5 --- /dev/null +++ b/plugins/ilias/ilias_session @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +# Munin plugin for ILIAS + +: << =cut + +=head1 NAME + +ilias_session - Munin plugin to monitor L open source +learning management system's sessions + +=head1 DESCRIPTION + +Reads session and user statistcs from any ILIAS MySQL/MariaDB database. + +https://ilias.de/ | http://gallery.munin-monitoring.org/contrib/cms-index.html + +Requirements: + +bash version 4 is required for associative array support. +This plugin requires mysql CLI or a compatible client being available. + +In order to get precise results, please ensure your MySQL server has the same +time as your ILIAS application server. Timezone does not matter. + +=head1 CONFIGURATION + +The plugin needs the following configuration settings e.g. in +/etc/munin/plugin-conf.d/ilias.conf + + [ilias_session] + env.ildbuser ilias + env.ildbpassword youriliaspasword + env.ildb ilias + env.ildbhost localhost + env.ildbport 3306 + +WARNING: Setting env.ildbpassword will possibly expose the database password +to other processes and might be insecure. + +=head1 AUTHOR + +Copyright 2018 L + (L) + +=head1 LICENSE + +Licensed under the MIT license: +https://opensource.org/licenses/MIT + +=head1 CONTRIBUTE + +Find this plugin on L + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 VERSION + + 2.0 + +=head1 CHANGELOG + +=head2 2.0 - 2018/04/20 + + first sh release + +=head2 1.0 - 2018/03/19 + + first release + +=cut + +# Include plugin.sh +# shellcheck source=/dev/null +. "${MUNIN_LIBDIR:-}/plugins/plugin.sh" + +# Shell options +set -o nounset # Like perl use strict; + +# Graph settings +global_attr=" + graph_title ILIAS session and logins + graph_category cms + graph_args --lower-limit 0 + graph_vlabel occurrences + graph_info Number of active ILIAS user sessions and logins +" + +declare -A d_attr=( \ + [0,field]=iltotal1day \ + [0,type]=GAUGE \ + [0,draw]=LINE \ + [0,label]='users logged in within day' \ + [0,sql]="SELECT COUNT( usr_id ) AS C + FROM \`usr_data\` + WHERE last_login >= DATE_SUB( NOW( ) , INTERVAL 1 DAY ) + " \ + [1,field]=ilsessions \ + [1,type]=GAUGE \ + [1,draw]=LINE \ + [1,label]='active sessions' \ + [1,sql]="SELECT COUNT( user_id ) AS C + FROM usr_session + WHERE \`expires\` > UNIX_TIMESTAMP( NOW( ) ) AND user_id != 0 + " \ + [2,field]=il60minavg \ + [2,type]=GAUGE \ + [2,draw]=LINE \ + [2,label]='sessions created/updated within 1h' \ + [2,sql]="SELECT COUNT( user_id ) AS C + FROM usr_session + WHERE 60 * 60 > UNIX_TIMESTAMP( NOW( ) ) - ctime AND user_id != 0 + " \ + [3,field]=il5minavg \ + [3,type]=GAUGE \ + [3,draw]=LINE \ + [3,label]='sessions created/updated within 5min' \ + [3,sql]="SELECT COUNT( user_id ) AS C + FROM usr_session + WHERE 5 * 60 > UNIX_TIMESTAMP( NOW( ) ) - ctime AND user_id != 0 + " \ +) + +# Read the environment and apply defaults +DB_CLI_TOOL="${ildbcli:-mysql}" +DB_CLI_CMD="$(command -v "${DB_CLI_TOOL}")" +DB_HOST="${ildbhost:-localhost}" +DB_PORT="${ildbport:-3306}" +DB="${ildb:-ilias}" +DB_USER="${ildbuser:-root}" +DB_PASSWORD="${ildbpassword:-}" + +# Functions + +autoconf() { + if command -v "${DB_CLI_TOOL}" >/dev/null ; then + echo yes + else + echo "no (failed to find executable '${DB_CLI_TOOL}')" + fi +} + +config() { + local label_max_length=45 + local i=0 + + # print global attributes + echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d' + + i=0 + # -v varname + # True if the shell variable varname is set (has been assigned a value). + # https://stackoverflow.com/a/45385463/2683737 + # shellcheck disable=SC2102 + while [[ -v d_attr[$i,field] ]]; do + field=${d_attr[$i,field]} + echo "$field.type ${d_attr[$i,type]}" + echo "$field.draw ${d_attr[$i,draw]}" + echo "$field.label ${d_attr[$i,label]:0:${label_max_length}}" + echo "$field.min 0" + ((++i)) + done +} + +# Join a bash array $1 is the glue +join_by() { + local d=$1 + shift + echo -n "$1" + shift + printf "%s" "${@/#/$d}" +} + +fetch() { + local i=0 + local query=() + local query_string="" + declare -a results + + # create an array of queries + i=0 + # shellcheck disable=SC2102 + while [[ -v d_attr[$i,field] ]]; do + query+=("${d_attr[$i,sql]}") + ((++i)) + done + + # build query by joining the array elements + query_string=$(join_by " UNION ALL " "${query[@]}") + + # obtain result using CLI call; don't supply password through + # command line; note that MySQL considers it insecure using + # an environment variable: + # >This method of specifying your MySQL password must + # >be considered extremely insecure and should not be used. + # >Some versions of ps include an option to display the + # >environment of running processes. [...] + result=$(MYSQL_PWD="$DB_PASSWORD" \ + "$DB_CLI_CMD" \ + --skip-column-names \ + -h "$DB_HOST" \ + -u "$DB_USER" \ + -P "$DB_PORT" \ + "$DB" \ + -e "$query_string" ) + + # initialize array + mapfile -t results <<< "$result" + + # extract result and echo it to stdout, which is + # captured by Munin + i=0 + # shellcheck disable=SC2102 + while [[ -v d_attr[$i,field] ]]; do + echo "${d_attr[$i,field]}.value ${results[$i]}" + ((++i)) + done +} + + +# Main +case ${1:-} in +autoconf) + autoconf + ;; +config) + config + [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + ;; +*) + fetch + ;; +esac + +exit 0 + diff --git a/plugins/imapproxy/imapproxy_multi b/plugins/imapproxy/imapproxy_multi index 12afc642..16fc5dae 100755 --- a/plugins/imapproxy/imapproxy_multi +++ b/plugins/imapproxy/imapproxy_multi @@ -1,7 +1,7 @@ #!/usr/bin/env python """=cut -=head1 NAME +=head1 NAME imapproxy - Munin multigraph plugin to monitor imapproxy using pimpstat @@ -55,7 +55,7 @@ def print_config(): print "cache_misses.type DERIVE" print "cache_misses.label Cache Misses" print "cache_misses.min 0" - print + print print "multigraph imapproxy_connections" print "graph_title Connection Statistics For ImapProxy" print "graph_args -l 0 --base 1000" @@ -79,7 +79,7 @@ def print_fetch(): connections_created = 0 connections_reused = 0 connections = Popen( - "pimpstat -c | egrep '(Total (Reused|Created)|Cache (Hits|Misses))'", + "pimpstat -c | egrep '(Total (Reused|Created)|Cache (Hits|Misses))'", shell=True, stdout=PIPE ) diff --git a/plugins/ip6/ip6_ b/plugins/ip6/ip6_ index 099de58c..5c7114ff 100755 --- a/plugins/ip6/ip6_ +++ b/plugins/ip6/ip6_ @@ -1,6 +1,6 @@ #!/bin/sh # -# Patched version of ip_ plugin to support IPv6 and ip6tables. +# Patched version of ip_ plugin to support IPv6 and ip6tables. # Most of plugin done by munin core developers. Modified for IPv6 support # by Lasse Karstensen February 2009. # @@ -28,22 +28,20 @@ IP=`basename $0 | sed 's/^ip6_//g' | tr '_' ':' ` if [ "$1" = "autoconf" ]; then if [ -r /proc/net/dev ]; then - ip6tables -L INPUT -v -n -x >/dev/null 2>/dev/null + ip6tables -L INPUT -v -n -x -w >/dev/null 2>/dev/null if [ $? -gt 0 ]; then echo "no (could not run ip6tables as user `whoami`)" - exit 1 else echo yes - exit 0 fi else echo "no (/proc/net/dev not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then - ip6tables -L INPUT -v -n -x 2>/dev/null | awk --posix '$8 ~ /^([0-9a-f]{1,4}(\:|\:\:)){1,7}([0-9a-f]{1,4})\/([0-9]{1,3})$/ { if (done[$8]!=1) {print $8; done[$8]=1;}}'|sed "s#/[0-9]\{1,3\}##" + ip6tables -L INPUT -v -n -x -w 2>/dev/null | awk --posix '$8 ~ /^([0-9a-f]{1,4}(\:|\:\:)){1,7}([0-9a-f]{1,4})\/([0-9]{1,3})$/ { if (done[$8]!=1) {print $8; done[$8]=1;}}'|sed "s#/[0-9]\{1,3\}##" exit 0 fi @@ -60,9 +58,9 @@ if [ "$1" = "config" ]; then echo 'in.label received' echo 'in.type DERIVE' echo 'in.min 0' - echo 'in.cdef in,8,*' + echo 'in.cdef in,8,*' exit 0 fi; -ip6tables -L INPUT -v -n -x | grep -m1 $IP | awk "{ print \"in.value \" \$2 }" -ip6tables -L OUTPUT -v -n -x | grep -m1 $IP | awk "{ print \"out.value \" \$2 }" +ip6tables -L INPUT -v -n -x -w | grep -m1 $IP | awk "{ print \"in.value \" \$2 }" +ip6tables -L OUTPUT -v -n -x -w | grep -m1 $IP | awk "{ print \"out.value \" \$2 }" diff --git a/plugins/ip6/ip6t_accounting b/plugins/ip6/ip6t_accounting index 2919a9c2..aaaebe50 100755 --- a/plugins/ip6/ip6t_accounting +++ b/plugins/ip6/ip6t_accounting @@ -10,7 +10,7 @@ graph_category network graph_args -l 0 graph_info ip6tables bites ipv6 EOF -ip6tables -vxL|grep -E 'ACC|REJ'|grep -v ^Chain|sed -e 's/dpt://g' -e 's/ .*://g'|awk '{print $NF"-"$4".label", $NF" "$4"\n",$NF"-"$4".min", 0}'|sed 's/^\s*//g' +ip6tables -vx -w -L | grep -E 'ACC|REJ'|grep -v ^Chain|sed -e 's/dpt://g' -e 's/ .*://g'|awk '{print $NF"-"$4".label", $NF" "$4"\n",$NF"-"$4".min", 0}'|sed 's/^\s*//g' exit 0 fi if [ "$1" = "autoconf" ]; then @@ -18,5 +18,5 @@ if [ "$1" = "autoconf" ]; then exit 0 fi -ip6tables -vxL|grep -E 'ACC|REJ'|grep -v ^Chain|awk '{print $NF"-"$4".value", $2}'|sed 's/^dpt://' +ip6tables -vx -w -L | grep -E 'ACC|REJ'|grep -v ^Chain|awk '{print $NF"-"$4".value", $2}'|sed 's/^dpt://' diff --git a/plugins/ipvs/ipvs_active b/plugins/ipvs/ipvs_active index c6fd9255..ded74d7a 100755 --- a/plugins/ipvs/ipvs_active +++ b/plugins/ipvs/ipvs_active @@ -35,7 +35,7 @@ IPLIST=$ips if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -64,7 +64,7 @@ fi function get_ip { # Read the output ipvsadm -l -n | nl | while read line; do - # If match the ip, print the line number + # If match the ip, print the line number if ( echo $line | grep -e $IP > /dev/null ); then MAT=`echo $line | cut -d " " -f 1` echo $MAT @@ -84,7 +84,7 @@ for IP in $IPLIST; do # Parse lines while read line; do - + # Get line numbers N=`echo $line | cut -d " " -f 1` @@ -94,7 +94,7 @@ for IP in $IPLIST; do if ( echo $line | grep -e TCP -e UDP > /dev/null ); then break fi - + COUNT=`expr $COUNT + 1` fi done < $F1 diff --git a/plugins/ipvs/ipvs_bps b/plugins/ipvs/ipvs_bps index 70e045e3..c13c30d4 100755 --- a/plugins/ipvs/ipvs_bps +++ b/plugins/ipvs/ipvs_bps @@ -35,7 +35,7 @@ IPLIST=$ips if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi diff --git a/plugins/ipvs/ipvs_conn b/plugins/ipvs/ipvs_conn index fb480b92..cec4e391 100755 --- a/plugins/ipvs/ipvs_conn +++ b/plugins/ipvs/ipvs_conn @@ -35,7 +35,7 @@ IPLIST=$ips if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -67,7 +67,7 @@ fi function get_ip { # Read the output ipvsadm -l -n | nl | while read line; do - # If match the ip, print the line number + # If match the ip, print the line number if ( echo $line | grep -e $IP > /dev/null ); then MAT=`echo $line | cut -d " " -f 1` echo $MAT @@ -87,7 +87,7 @@ for IP in $IPLIST; do # Parse lines while read line; do - + # Get line numbers N=`echo $line | cut -d " " -f 1` @@ -97,7 +97,7 @@ for IP in $IPLIST; do if ( echo $line | grep -e TCP -e UDP > /dev/null ); then break fi - + # Get ActiveConn number NUM1=`echo $line | awk '{print $6}'` # Sum it @@ -106,7 +106,7 @@ for IP in $IPLIST; do NUM2=`echo $line | awk '{print $7}'` # Sum it INACTCONCNT=$(( INACTCONCNT + NUM2)) - + COUNT=`expr $COUNT + 1` fi done < $F1 diff --git a/plugins/ipvs/ipvs_cps b/plugins/ipvs/ipvs_cps index 41654535..502ff9f0 100755 --- a/plugins/ipvs/ipvs_cps +++ b/plugins/ipvs/ipvs_cps @@ -35,7 +35,7 @@ IPLIST=$ips if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi diff --git a/plugins/ircd/irc2 b/plugins/ircd/irc2 index 4a37c396..21361098 100755 --- a/plugins/ircd/irc2 +++ b/plugins/ircd/irc2 @@ -66,7 +66,7 @@ if($ARGV[0] and $ARGV[0] eq "config") { my %result; # We create a new PoCo-IRC object -my $irc = POE::Component::IRC->spawn( +my $irc = POE::Component::IRC->spawn( nick => $nickname, ircname => $ircname, server => $server, @@ -119,7 +119,7 @@ sub irc_001 { } -#irc_251: 'moo.us.p2p-network.net' 'There are 155 users and 3397 invisible on 16 servers' [There are 155 users and 3397 invisible on 16 servers] +#irc_251: 'moo.us.p2p-network.net' 'There are 155 users and 3397 invisible on 16 servers' [There are 155 users and 3397 invisible on 16 servers] # luserclient sub irc_251 { #print "In 251\n"; @@ -152,7 +152,7 @@ sub irc_251 { #printf "251 Got clients=%d servers=%d\n", ($result{'clients'} || -1), ($result{'servers'} || -1); } -#irc_252: 'moo.us.p2p-network.net' '18 :operator(s) online' [18, operator(s) online] +#irc_252: 'moo.us.p2p-network.net' '18 :operator(s) online' [18, operator(s) online] # opers sub irc_252 { my $sender = $_[SENDER]; @@ -165,7 +165,7 @@ sub irc_252 { #printf "254 Got channels %d\n", ($result{'channels'} || -1); } -#irc_253: 'moo.us.p2p-network.net' '1 :unknown connection(s)' [1, unknown connection(s)] +#irc_253: 'moo.us.p2p-network.net' '1 :unknown connection(s)' [1, unknown connection(s)] sub irc_253 { my $sender = $_[SENDER]; my $irc = $sender->get_heap(); @@ -177,7 +177,7 @@ sub irc_253 { #printf "254 Got channels %d\n", ($result{'channels'} || -1); } -#irc_254: 'moo.us.p2p-network.net' '1325 :channels formed' [1325, channels formed] +#irc_254: 'moo.us.p2p-network.net' '1325 :channels formed' [1325, channels formed] # luserchannels sub irc_254 { my $sender = $_[SENDER]; @@ -190,7 +190,7 @@ sub irc_254 { #printf "254 Got channels %d\n", ($result{'channels'} || -1); } -#irc_255: 'moo.us.p2p-network.net' 'I have 348 clients and 1 servers' [I have 348 clients and 1 servers] +#irc_255: 'moo.us.p2p-network.net' 'I have 348 clients and 1 servers' [I have 348 clients and 1 servers] # local clients/servers sub irc_255 { my $sender = $_[SENDER]; @@ -203,7 +203,7 @@ sub irc_255 { } } -#irc_265: 'moo.us.p2p-network.net' 'Current Local Users: 348 Max: 1900' [Current Local Users: 348 Max: 1900] +#irc_265: 'moo.us.p2p-network.net' 'Current Local Users: 348 Max: 1900' [Current Local Users: 348 Max: 1900] sub irc_265 { #print "In 265\n"; my $sender = $_[SENDER]; @@ -216,7 +216,7 @@ sub irc_265 { } } -#irc_266: 'moo.us.p2p-network.net' 'Current Global Users: 3552 Max: 8742' [Current Global Users: 3552 Max: 8742] +#irc_266: 'moo.us.p2p-network.net' 'Current Global Users: 3552 Max: 8742' [Current Global Users: 3552 Max: 8742] sub irc_266 { #print "In 266\n"; my $sender = $_[SENDER]; diff --git a/plugins/ircd/ircd b/plugins/ircd/ircd index 7770d354..4712403c 100755 --- a/plugins/ircd/ircd +++ b/plugins/ircd/ircd @@ -2,12 +2,12 @@ + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + +=cut + +# shellcheck disable=SC1090 +. "${MUNIN_LIBDIR:-.}/plugins/plugin.sh" + +CURL_ARGS='-s' +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + CURL_ARGS='-v' + set -x +fi + +if ! command -v curl >/dev/null; then + echo "curl not found" >&2 + exit 1 +fi +if ! command -v xpath >/dev/null; then + echo "xpath (Perl XML::LibXML) not found" >&2 + exit 1 +fi +if ! command -v bc >/dev/null; then + echo "bc not found" >&2 + exit 1 +fi + +if [ -z "${internode_api_url:-}" ]; then + internode_api_url="https://customer-webtools-api.internode.on.net/api/v1.5" +fi + +xpath_extract() { + # shellcheck disable=SC2039 + local xpath="$1" + # shellcheck disable=SC2039 + local node="$(xpath -q -n -e "${xpath}")" \ + || { echo "error extracting ${xpath}" >&2; false; } + echo "${node}" | sed 's/<\([^>]*\)>\([^<]*\)<[^>]*>/\2/;s^N/A^U^' +} + +xpath_extract_attribute() { + # shellcheck disable=SC2039 + local xpath="$1" + # shellcheck disable=SC2039 + local node="$(xpath -q -n -e "${xpath}")" \ + || { echo "error extracting attribute at ${xpath}" >&2; false; } + echo "${node}" | sed 's/.*="\([^"]\+\)".*/\1/' +} + +fetch() { + # shellcheck disable=SC2154 + curl -u "${internode_api_login}:${internode_api_password}" -f ${CURL_ARGS} "$@" \ + || { echo "error fetching ${*} for user ${internode_api_login}" >&2; false; } +} + +get_cached_api() { + # shellcheck disable=SC2039 + local url=${1} + # shellcheck disable=SC2039 + local name=${2} + # shellcheck disable=SC2039 + local api_data='' + # shellcheck disable=SC2039 + local cachefile="${MUNIN_PLUGSTATE}/$(basename "${0}").${name}.cache" + if [ -n "$(find "${cachefile}" -mmin -1440 2>/dev/null)" ]; then + api_data=$(cat "${cachefile}") + else + api_data="$(fetch "${url}" \ + || true)" + + if [ -n "${api_data}" ]; then + echo "${api_data}" > ${cachefile} + else + echo "using ${name} info from stale cache ${cachefile}" >&2 + api_data=$(cat "${cachefile}") + fi + fi + echo "${api_data}" +} + +get_service_data() { + # Determine the service ID from the name of the symlink + SERVICE_ID="$(echo "${0}" | sed -n 's/^.*internode_usage_//p')" + if [ -z "${SERVICE_ID}" ]; then + # Otherwise, get the first service in the list + API_XML="$(get_cached_api ${internode_api_url} API_XML)" + if [ -z "${API_XML}" ]; then + echo "unable to determine service ID for user ${internode_api_login}" >&2 + exit 1 + fi + SERVICE_ID="$(echo "${API_XML}" | xpath_extract "internode/api/services/service")" + fi + + + CURRENT_TIMESTAMP="$(date +%s)" + SERVICE_USERNAME='n/a' + SERVICE_QUOTA='n/a' + SERVICE_PLAN='n/a' + SERVICE_ROLLOVER='n/a' + IDEAL_USAGE='' + USAGE_CRITICAL='' + SERVICE_XML="$(get_cached_api "${internode_api_url}/${SERVICE_ID}/service" SERVICE_XML \ + || true)" + if [ -n "${SERVICE_XML}" ]; then + SERVICE_USERNAME="$(echo "${SERVICE_XML}" | xpath_extract "internode/api/service/username")" + SERVICE_QUOTA="$(echo "${SERVICE_XML}" | xpath_extract "internode/api/service/quota")" + SERVICE_PLAN="$(echo "${SERVICE_XML}" | xpath_extract "internode/api/service/plan")" + SERVICE_ROLLOVER="$(echo "${SERVICE_XML}" | xpath_extract "internode/api/service/rollover")" + SERVICE_INTERVAL="$(echo "${SERVICE_XML}" | xpath_extract "internode/api/service/plan-interval" | sed 's/ly$//')" + + FIRST_DAY="$(date +%s --date "${SERVICE_ROLLOVER} -1 ${SERVICE_INTERVAL}")" + LAST_DAY="$(date +%s --date "${SERVICE_ROLLOVER}")" + BILLING_PERIOD="(${LAST_DAY}-${FIRST_DAY})" + IDEAL_USAGE="$(echo "${SERVICE_QUOTA}-(${SERVICE_QUOTA}*(${LAST_DAY}-${CURRENT_TIMESTAMP})/${BILLING_PERIOD})" | bc -ql)" + + USAGE_CRITICAL="${SERVICE_QUOTA}" + fi + +} + +get_data() { + DAILY_TIMESTAMP=N + DAILY_USAGE=U + HISTORY_XML="$(fetch "${internode_api_url}/${SERVICE_ID}/history" \ + || true)" + if [ -n "${HISTORY_XML}" ]; then + DAILY_USAGE="$(echo "${HISTORY_XML}" | xpath_extract "internode/api/usagelist/usage[last()-1]/traffic")" + DAILY_DATE="$(echo "${HISTORY_XML}" | xpath_extract_attribute "internode/api/usagelist/usage[last()-1]/@day")" + DAILY_TIMESTAMP="$(date -d "${DAILY_DATE} $(date +%H:%M:%S)" +%s \ + || echo N)" + fi + + SERVICE_USAGE='U' + USAGE_XML="$(fetch "${internode_api_url}/${SERVICE_ID}/usage" \ + || true)" + if [ -n "${USAGE_XML}" ]; then + SERVICE_USAGE="$(echo "${USAGE_XML}" | xpath_extract "internode/api/traffic")" + + + fi +} + +graph_config() { + graph="" + if [ -n "${1:-}" ]; then + graph=".$1" + fi + + echo "multigraph internode_usage_${SERVICE_ID}${graph}" + + case "$graph" in + .current) + echo "graph_title Uplink usage rate (hourly)" + echo "graph_info Username: ${SERVICE_USERNAME}; Service ID: ${SERVICE_ID}; Plan: ${SERVICE_PLAN}" + echo 'graph_category network' + # ${graph_period} is not a shell variable + # shellcheck disable=SC2016 + echo 'graph_vlabel bytes per ${graph_period}' + # XXX: this seems to be updated twice per hour; + # the data from this graph may be nonsense + echo 'graph_period hour' + + echo "hourly_rate.label Hourly usage" + echo "hourly_rate.type DERIVE" + echo "hourly_rate.min 0" + + ;; + .daily) + echo "graph_title Uplink usage rate (daily)" + echo "graph_info Username: ${SERVICE_USERNAME}; Service ID: ${SERVICE_ID}; Plan: ${SERVICE_PLAN}" + echo "graph_info Uplink usage rate (daily)" + echo 'graph_category network' + # ${graph_period} is not a shell variable + # shellcheck disable=SC2016 + echo 'graph_vlabel bytes per ${graph_period}' + echo 'graph_period day' + + echo "daily_rate.label Previous-day usage" + echo "daily_rate.type GAUGE" + + ;; + *) + #.usage) + echo "graph_title Uplink usage" + echo "graph_info Username: ${SERVICE_USERNAME}; Service ID: ${SERVICE_ID}; Plan: ${SERVICE_PLAN}" + echo 'graph_category network' + echo 'graph_vlabel bytes' + echo 'graph_period hour' + + echo "usage.label Total usage" + echo "usage.draw AREA" + echo "ideal.extinfo Quota rollover: ${SERVICE_ROLLOVER}" + echo "ideal.label Ideal usage" + echo "ideal.draw LINE" + echo "ideal.colour FFA500" + + echo "usage.critical ${USAGE_CRITICAL}" + echo "usage.warning ${IDEAL_USAGE}" + echo "ideal.critical 0:" + echo "ideal.warning 0:" + ;; + esac + echo +} + +graph_data() { + graph="" + if [ -n "${1:-}" ]; then + graph=".${1}" + fi + + echo "multigraph internode_usage_${SERVICE_ID}${graph}" + case "${graph}" in + .current) + echo "hourly_rate.value ${CURRENT_TIMESTAMP}:${SERVICE_USAGE:-U}" + ;; + .daily) + echo "daily_rate.value ${DAILY_TIMESTAMP}:${DAILY_USAGE:-U}" + ;; + *) + echo "usage.value ${CURRENT_TIMESTAMP}:${SERVICE_USAGE:-U}" + echo "ideal.value ${CURRENT_TIMESTAMP}:${IDEAL_USAGE:-U}" + ;; + esac + echo +} + +main() { + case ${1:-} in + config) + if [ -n "${host_name:-}" ]; then + echo "host_name ${host_name}" + fi + graph_config + graph_config usage + graph_config daily + graph_config current + ;; + *) + get_data + graph_data + graph_data usage + graph_data daily + graph_data current + ;; + esac +} + +get_service_data + +main "${1:-}" diff --git a/plugins/java/jmx/plugin/.classpath b/plugins/java/jmx/plugin/.classpath new file mode 100644 index 00000000..fb501163 --- /dev/null +++ b/plugins/java/jmx/plugin/.classpath @@ -0,0 +1,6 @@ + + + + + + diff --git a/plugins/java/jmx/plugin/.gitignore b/plugins/java/jmx/plugin/.gitignore new file mode 100644 index 00000000..f00d6296 --- /dev/null +++ b/plugins/java/jmx/plugin/.gitignore @@ -0,0 +1,3 @@ +/bin/ +/.settings/ +*.class diff --git a/plugins/java/jmx/plugin/.project b/plugins/java/jmx/plugin/.project new file mode 100644 index 00000000..8fe5521c --- /dev/null +++ b/plugins/java/jmx/plugin/.project @@ -0,0 +1,17 @@ + + + plugin + + + + + + org.eclipse.jdt.core.javabuilder + + + + + + org.eclipse.jdt.core.javanature + + diff --git a/plugins/java/jmx/plugin/META-INF/MANIFEST.MF b/plugins/java/jmx/plugin/META-INF/MANIFEST.MF new file mode 100644 index 00000000..bd17a1b5 --- /dev/null +++ b/plugins/java/jmx/plugin/META-INF/MANIFEST.MF @@ -0,0 +1,3 @@ +Manifest-Version: 1.0 +Ant-Version: Apache Ant 1.8.1 +Created-By: 1.6.0_22-b04-307-10M3261 (Apple Inc.) diff --git a/plugins/java/jmx/plugin/src/org/munin/Configuration.java b/plugins/java/jmx/plugin/src/org/munin/Configuration.java new file mode 100644 index 00000000..5e4b5d8e --- /dev/null +++ b/plugins/java/jmx/plugin/src/org/munin/Configuration.java @@ -0,0 +1,148 @@ +package org.munin; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.io.IOException; +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; + +public class Configuration { + private Properties graph_properties = new Properties(); + private Map fieldMap = new HashMap(); + private List fields = new ArrayList(); + + public class FieldProperties extends Properties { + private static final long serialVersionUID = 1L; + private ObjectName jmxObjectName; + private String jmxAttributeName; + private String jmxAttributeKey; + private String fieldname; + private static final String JMXOBJECT = "jmxObjectName"; + private static final String JMXATTRIBUTE = "jmxAttributeName"; + private static final String JMXATTRIBUTEKEY = "jmxAttributeKey"; + + public FieldProperties(Configuration paramConfiguration, + String fieldname) { + this.fieldname = fieldname; + } + + public String getJmxAttributeKey() { + return jmxAttributeKey; + } + + public String getJmxAttributeName() { + return jmxAttributeName; + } + + public ObjectName getJmxObjectName() { + return jmxObjectName; + } + + public String toString() { + return fieldname; + } + + public void set(String key, String value) + throws MalformedObjectNameException, NullPointerException { + if ("jmxObjectName".equals(key)) { + if (jmxObjectName != null) + throw new IllegalStateException( + "jmxObjectName already set for " + this); + jmxObjectName = new ObjectName(value); + } else if ("jmxAttributeName".equals(key)) { + if (jmxAttributeName != null) + throw new IllegalStateException( + "jmxAttributeName already set for " + this); + jmxAttributeName = value; + } else if ("jmxAttributeKey".equals(key)) { + if (jmxAttributeKey != null) + throw new IllegalStateException( + "jmxAttributeKey already set for " + this); + jmxAttributeKey = value; + } else { + put(key, value); + } + } + + public void report(PrintStream out) { + for (Iterator it = entrySet().iterator(); it.hasNext();) { + Map.Entry entry = (Map.Entry) it.next(); + out.println(fieldname + '.' + entry.getKey() + " " + + entry.getValue()); + } + } + + public String getFieldname() { + return fieldname; + } + } + + public static Configuration parse(String config_file) throws IOException, + MalformedObjectNameException, NullPointerException { + BufferedReader reader = new BufferedReader(new FileReader(config_file)); + Configuration configuration = new Configuration(); + try { + for (;;) { + String s = reader.readLine(); + if (s == null) + break; + if ((!s.startsWith("%")) && (s.length() > 5) + && (!s.startsWith(" "))) { + configuration.parseString(s); + } + } + } finally { + reader.close(); + } + + return configuration; + } + + private void parseString(String s) throws MalformedObjectNameException, + NullPointerException { + String[] nameval = s.split(" ", 2); + if (nameval[0].indexOf('.') > 0) { + String name = nameval[0]; + String fieldname = name.substring(0, name.lastIndexOf('.')); + if (!fieldMap.containsKey(fieldname)) { + Configuration.FieldProperties field = new Configuration.FieldProperties( + this, fieldname); + fieldMap.put(fieldname, field); + fields.add(field); + } + Configuration.FieldProperties field = (Configuration.FieldProperties) fieldMap + .get(fieldname); + String key = name.substring(name.lastIndexOf('.') + 1); + field.set(key, nameval[1]); + } else { + graph_properties.put(nameval[0], nameval[1]); + } + } + + public Properties getGraphProperties() { + return graph_properties; + } + + public void report(PrintStream out) { + for (Iterator it = graph_properties.entrySet().iterator(); it.hasNext();) { + Map.Entry entry = (Map.Entry) it.next(); + out.println(entry.getKey() + " " + entry.getValue()); + } + + for (Configuration.FieldProperties field : fields) { + field.report(out); + } + } + + public List getFields() { + return fields; + } +} diff --git a/plugins/java/jmx/plugin/src/org/munin/JMXQuery.java b/plugins/java/jmx/plugin/src/org/munin/JMXQuery.java new file mode 100644 index 00000000..0ac91905 --- /dev/null +++ b/plugins/java/jmx/plugin/src/org/munin/JMXQuery.java @@ -0,0 +1,228 @@ +package org.munin; + +import java.io.IOException; +import java.text.NumberFormat; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanInfo; +import javax.management.MBeanServerConnection; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.openmbean.CompositeDataSupport; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; + +public class JMXQuery { + private static final String USERNAME_KEY = "username"; + private static final String PASSWORD_KEY = "password"; + public static final String USAGE = "Usage of program is:\njava -cp jmxquery.jar org.munin.JMXQuery --url= [--user= --pass=] [--conf= [config]]\n, where is a JMX URL, for example: service:jmx:rmi:///jndi/rmi://HOST:PORT/jmxrmi\nWhen invoked with the config file (see examples folder) - operates as Munin plugin with the provided configuration\nWithout options just fetches all JMX attributes using provided URL"; + private String url; + private String username; + private String password; + private JMXConnector connector; + private MBeanServerConnection connection; + private Configuration config; + + public JMXQuery(String url) { + this(url, null, null); + } + + public JMXQuery(String url, String username, String password) { + this.url = url; + this.username = username; + this.password = password; + } + + private void connect() throws IOException { + Map environment = null; + if ((username != null) && (password != null)) { + environment = new HashMap(); + + environment.put("jmx.remote.credentials", new String[] { username, + password }); + environment.put("username", username); + environment.put("password", password); + } + + JMXServiceURL jmxUrl = new JMXServiceURL(url); + connector = JMXConnectorFactory.connect(jmxUrl, environment); + connection = connector.getMBeanServerConnection(); + } + + private void list() throws IOException, InstanceNotFoundException, + IntrospectionException, ReflectionException { + if (config == null) { + listAll(); + } else { + listConfig(); + } + } + + private void listConfig() { + for (Configuration.FieldProperties field : config.getFields()) { + try { + Object value = connection.getAttribute( + field.getJmxObjectName(), field.getJmxAttributeName()); + output(field.getFieldname(), value, field.getJmxAttributeKey()); + } catch (Exception e) { + System.err.println("Fail to output " + field); + e.printStackTrace(); + } + } + } + + private void output(String name, Object attr, String key) { + if ((attr instanceof CompositeDataSupport)) { + CompositeDataSupport cds = (CompositeDataSupport) attr; + if (key == null) { + throw new IllegalArgumentException( + "Key is null for composed data " + name); + } + System.out.println(name + ".value " + format(cds.get(key))); + } else { + System.out.println(name + ".value " + format(attr)); + } + } + + private void output(String name, Object attr) { + CompositeDataSupport cds; + Iterator it; + if ((attr instanceof CompositeDataSupport)) { + cds = (CompositeDataSupport) attr; + for (it = cds.getCompositeType().keySet().iterator(); it.hasNext();) { + String key = it.next().toString(); + System.out.println(name + "." + key + ".value " + + format(cds.get(key))); + } + } else { + System.out.println(name + ".value " + format(attr)); + } + } + + private void listAll() throws IOException, InstanceNotFoundException, + IntrospectionException, ReflectionException { + Set mbeans = connection.queryNames(null, null); + for (ObjectName name : mbeans) { + MBeanInfo info = connection.getMBeanInfo(name); + MBeanAttributeInfo[] attrs = info.getAttributes(); + String[] attrNames = new String[attrs.length]; + for (int i = 0; i < attrs.length; i++) { + attrNames[i] = attrs[i].getName(); + } + try { + AttributeList attributes = connection.getAttributes(name, + attrNames); + for (Attribute attribute : attributes.asList()) { + output(name.getCanonicalName() + "%" + attribute.getName(), + attribute.getValue()); + } + } catch (Exception e) { + System.err.println("error getting " + name + ":" + + e.getMessage()); + } + } + } + + private String format(Object value) { + if (value == null) + return null; + if ((value instanceof String)) + return (String) value; + if ((value instanceof Number)) { + NumberFormat f = NumberFormat.getInstance(); + f.setMaximumFractionDigits(2); + f.setGroupingUsed(false); + return f.format(value); + } + if ((value instanceof Object[])) { + return Integer.toString(Arrays.asList((Object[]) value).size()); + } + return value.toString(); + } + + private void disconnect() throws IOException { + connector.close(); + } + + public static void main(String[] args) { + int arglen = args.length; + if (arglen < 1) { + System.err + .println("Usage of program is:\njava -cp jmxquery.jar org.munin.JMXQuery --url= [--user= --pass=] [--conf= [config]]\n, where is a JMX URL, for example: service:jmx:rmi:///jndi/rmi://HOST:PORT/jmxrmi\nWhen invoked with the config file (see examples folder) - operates as Munin plugin with the provided configuration\nWithout options just fetches all JMX attributes using provided URL"); + System.exit(1); + } + + String url = null; + String user = null; + String pass = null; + String config_file = null; + boolean toconfig = false; + for (int i = 0; i < arglen; i++) { + if (args[i].startsWith("--url=")) { + url = args[i].substring(6); + } else if (args[i].startsWith("--user=")) { + user = args[i].substring(7); + } else if (args[i].startsWith("--pass=")) { + pass = args[i].substring(7); + } else if (args[i].startsWith("--conf=")) { + config_file = args[i].substring(7); + } else if (args[i].equals("config")) { + toconfig = true; + } + } + + if ((url == null) || ((user != null) && (pass == null)) + || ((user == null) && (pass != null)) + || ((config_file == null) && (toconfig))) { + System.err + .println("Usage of program is:\njava -cp jmxquery.jar org.munin.JMXQuery --url= [--user= --pass=] [--conf= [config]]\n, where is a JMX URL, for example: service:jmx:rmi:///jndi/rmi://HOST:PORT/jmxrmi\nWhen invoked with the config file (see examples folder) - operates as Munin plugin with the provided configuration\nWithout options just fetches all JMX attributes using provided URL"); + System.exit(1); + } + + if (toconfig) { + try { + Configuration.parse(config_file).report(System.out); + } catch (Exception e) { + System.err.println(e.getMessage() + " reading " + config_file); + System.exit(1); + } + } else { + JMXQuery query = new JMXQuery(url, user, pass); + try { + query.connect(); + if (config_file != null) { + query.setConfig(Configuration.parse(config_file)); + } + query.list(); + } catch (Exception ex) { + System.err.println(ex.getMessage() + " querying " + url); + ex.printStackTrace(); + System.exit(1); + } finally { + try { + query.disconnect(); + } catch (IOException e) { + System.err.println(e.getMessage() + " closing " + url); + } + } + } + } + + private void setConfig(Configuration configuration) { + config = configuration; + } + + public Configuration getConfig() { + return config; + } +} diff --git a/plugins/jchkmail/jchkmail_counters_ b/plugins/jchkmail/jchkmail_counters_ old mode 100644 new mode 100755 index ef30fa96..78c459c1 --- a/plugins/jchkmail/jchkmail_counters_ +++ b/plugins/jchkmail/jchkmail_counters_ @@ -39,7 +39,7 @@ The following environment variables may be defined : =head1 COPYRIGHT Copyright Jose-Marcio Martins da Cruz -=head1 VERSION +=head1 VERSION 1.0 - Feb 2014 @@ -99,7 +99,7 @@ if (exists $ENV{statefile} && defined $ENV{statefile}) { # # if ($#ARGV >= 0 && $ARGV[0] eq "autoconf") { - + unless (-f $SMCF) { print "no\n"; exit 0; @@ -433,7 +433,7 @@ sub SaveState { printf FSTATE "%-20s %s\n", $k, $h->{$k}; } close FSTATE; - + Munin::Plugin::save_state(%$h); return 1; } @@ -516,7 +516,7 @@ __DATA__ conn Connections conn.info SMTP Connections per time unit msgs Messages -msgs.info Messages per time unit +msgs.info Messages per time unit rcpt Recipients rcpt.info Recipients per time unit @@ -556,8 +556,8 @@ kbytes.info Volume (KBytes) per time unit j-greyvalid Grey Validated records j-greyvalid.info Grey Validated records -j-greypend Grey Waiting records -j-greypend.info Grey Waiting records +j-greypend Grey Waiting records +j-greypend.info Grey Waiting records j-greywhitelist Grey Whitelisted records j-greywhitelist.info Grey Whitelisted records j-greyblacklist j-greyblacklist diff --git a/plugins/jenkins/jenkins_ b/plugins/jenkins/jenkins_ old mode 100644 new mode 100755 index 9680030a..27fe1958 --- a/plugins/jenkins/jenkins_ +++ b/plugins/jenkins/jenkins_ @@ -13,7 +13,7 @@ This plugin displays the following charts: 2) Number of Jobs in the Build Queue 3) Number of Builds, currently running -You can set the modes with naming the softlink: +You can set the modes with naming the symlink: 1) jenkins_results 2) jenkins_queue @@ -21,13 +21,14 @@ You can set the modes with naming the softlink: =head1 CONFIGURATION -This plugin is configurable environment variables. +This plugin is configurable via environment variables. -env.url Jenkins Host -env.port Jenkins Port -env.context Jenkins Context path -env.user User for the API Tokent -env.apiToken Jenkins API Token (see https://wiki.jenkins-ci.org/display/JENKINS/Authenticating+scripted+clients) +env.url Jenkins Host +env.port Jenkins Port +env.context Jenkins Context path +env.user User for the API Tokent +env.apiToken Jenkins API Token (see https://wiki.jenkins-ci.org/display/JENKINS/Authenticating+scripted+clients) +env.jobDepth How far into job "folders" should the plugin check for jobs Example: @@ -54,6 +55,7 @@ use warnings; use strict; use JSON; use File::Basename; +use URI; # VARS my $url = ($ENV{'url'} || 'localhost'); @@ -61,6 +63,7 @@ my $port = ($ENV{'port'} || '4040'); my $user = ($ENV{'user'} || ''); my $apiToken = ($ENV{'apiToken'} || ''); my $context = ($ENV{'context'} || ''); +my $jobDepth = ($ENV{'jobDepth'} || 1); my $wgetBin = "/usr/bin/wget"; my $type = basename($0); @@ -79,92 +82,110 @@ my %states = ( 'aborted'=>'failing', 'aborted_anime'=>'failing' ); -my %counts = ('stable' => 0, 'unstable'=>0, 'failing'=>0, 'disabled'=>0); +my $auth = ( $user ne "" and $apiToken ne "" ? " --auth-no-challenge --user=$user --password=$apiToken" : "" ); if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { if( $type eq "results" ) { - print "graph_args --base 1000 -l 0\n"; - print "graph_title Jenkins Build Results\n"; - print "graph_vlabel Build Results\n"; - print "graph_category devel\n"; - print "graph_info The Graph shows the Status of each Build\n"; - print "build_disabled.draw AREA\n"; - print "build_disabled.label disabled\n"; - print "build_disabled.type GAUGE\n"; - print "build_disabled.colour 8A8A8A\n"; - print "build_failing.draw STACK\n"; - print "build_failing.label failing\n"; - print "build_failing.type GAUGE\n"; - print "build_failing.colour E61217\n"; - print "build_unstable.draw STACK\n"; - print "build_unstable.label unstable\n"; - print "build_unstable.type GAUGE\n"; - print "build_unstable.colour F3E438\n"; - print "build_stable.draw STACK\n"; - print "build_stable.label stable\n"; - print "build_stable.type GAUGE\n"; - print "build_stable.colour 294D99\n"; - exit; - } - if( $type eq "queue" ) { print "graph_args --base 1000 -l 0\n"; - print "graph_title Jenkins Queue Length\n"; - print "graph_vlabel Number of Jobs in Queue\n"; - print "graph_category devel\n"; - print "graph_info The Graph shows the Number of Jobs in the Build Queue\n"; - print "build_count.label Jobs in Queue\n"; - print "build_count.type GAUGE\n"; - exit; - } - if( $type eq "running" ) { + print "graph_title Jenkins Build Results\n"; + print "graph_vlabel Build Results\n"; + print "graph_category devel\n"; + print "graph_info The Graph shows the Status of each Build\n"; + print "build_disabled.draw AREA\n"; + print "build_disabled.label disabled\n"; + print "build_disabled.type GAUGE\n"; + print "build_disabled.colour 8A8A8A\n"; + print "build_failing.draw STACK\n"; + print "build_failing.label failing\n"; + print "build_failing.type GAUGE\n"; + print "build_failing.colour E61217\n"; + print "build_unstable.draw STACK\n"; + print "build_unstable.label unstable\n"; + print "build_unstable.type GAUGE\n"; + print "build_unstable.colour F3E438\n"; + print "build_stable.draw STACK\n"; + print "build_stable.label stable\n"; + print "build_stable.type GAUGE\n"; + print "build_stable.colour 294D99\n"; + } elsif( $type eq "queue" ) { print "graph_args --base 1000 -l 0\n"; - print "graph_title Jenkins Builds Running\n"; - print "graph_vlabel Builds currently running\n"; - print "graph_category devel\n"; - print "graph_info The Graph shows the Number of Builds, currently running\n"; - print "build_running.label running Builds\n"; - print "build_running.type GAUGE\n"; - exit; - } + print "graph_title Jenkins Queue Length\n"; + print "graph_vlabel Number of Jobs in Queue\n"; + print "graph_category devel\n"; + print "graph_info The Graph shows the Number of Jobs in the Build Queue\n"; + print "build_count.label Jobs in Queue\n"; + print "build_count.type GAUGE\n"; + } elsif( $type eq "running" ) { + print "graph_args --base 1000 -l 0\n"; + print "graph_title Jenkins Builds Running\n"; + print "graph_vlabel Builds currently running\n"; + print "graph_category devel\n"; + print "graph_info The Graph shows the Number of Builds, currently running\n"; + print "build_running.label running Builds\n"; + print "build_running.type GAUGE\n"; + } else { + warn "Unknown mode requested: $type\n"; + } } else { - # CODE - my $auth = ( $user ne "" and $apiToken ne "" ? " --auth-no-challenge --user=$user --password=$apiToken" : "" ); - my $cmd = "$wgetBin $auth -qO- $url:$port$context"; - + my $cmd = "$wgetBin $auth -qO- $url:$port$context"; + + my $tree = 'jobs[name,color]'; + for (2..$jobDepth) { + $tree = "jobs[name,color,$tree]"; + } + if( $type eq "results" ) { - my $result = `$cmd/api/json`; + my $result = `$cmd'/api/json?depth=$jobDepth&tree=$tree'`; my $parsed = decode_json($result); - foreach my $cur(@{$parsed->{'jobs'}}) { - if (defined $states{$cur->{'color'}}) { + my $counts = parse_results($parsed->{'jobs'}); + + foreach my $status (keys %{$counts}) { + print "build_$status.value $counts->{$status}\n"; + } + } elsif( $type eq "running" ) { + my $result = `$cmd'/api/json?depth=$jobDepth&tree=$tree'`; + my $parsed = decode_json($result); + my $count = parse_running_builds($parsed->{'jobs'}); + print "build_running.value ", $count, "\n"; + } elsif( $type eq "queue" ) { + my $result = `$cmd/queue/api/json`; + my $parsed = decode_json($result); + print "build_count.value ", scalar( @{$parsed->{'items'}} ), "\n"; + } else { + warn "Unknown mode requested: $type\n"; + } +} + +sub parse_running_builds { + my $builds = shift; + my $count = 0; + foreach my $cur (@{$builds}) { + if( defined($cur->{'jobs'}) ) { + $count += parse_running_builds($cur->{'jobs'}); + } elsif( defined ($cur->{'color'}) and $cur->{'color'} =~ /anime$/ ) { + $count += 1; + } + } + return $count; +} + +sub parse_results { + my $builds = shift; + my %counts = ('stable' => 0, 'unstable' => 0, 'failing' => 0, 'disabled' => 0); + + foreach my $cur(@{$builds}) { + if( defined($cur->{'jobs'}) ) { + my $new_counts = parse_results($cur->{'jobs'}); + foreach my $new_count_key (keys %{$new_counts}) { + $counts{$new_count_key} += $new_counts->{$new_count_key}; + } + } elsif (defined($cur->{'color'})) { + if (defined($states{$cur->{'color'}})) { $counts{$states{$cur->{'color'}}} += 1; } else { warn "Ignoring unknown color " . $cur->{'color'} . "\n" } } - - foreach my $status (keys %counts) { - print "build_$status.value $counts{$status}\n"; - } - exit; - } - - if( $type eq "running" ) { - my $count = 0; - my $result = `$cmd/api/json`; - my $parsed = decode_json($result); - foreach my $cur(@{$parsed->{'jobs'}}) { - if( $cur->{'color'} =~ /anime$/ ) { - $count += 1; - } - } - print "build_running.value ", $count, "\n"; - exit; - } - - if( $type eq "queue" ) { - my $result = `$cmd/queue/api/json`; - my $parsed = decode_json($result); - print "build_count.value ", scalar( @{$parsed->{'items'}} ), "\n"; - exit; } + return \%counts; } diff --git a/plugins/jenkins/jenkins_nodes_ b/plugins/jenkins/jenkins_nodes_ old mode 100644 new mode 100755 index 8293caed..ab18cc20 --- a/plugins/jenkins/jenkins_nodes_ +++ b/plugins/jenkins/jenkins_nodes_ @@ -51,7 +51,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $result = `$cmd/computer/api/json`; my $parsed = decode_json($result); foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { my $cat = $cur->{'displayName'}; $cat =~ s/\./\_/g; if( $lcount > 0 ){ @@ -77,7 +77,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $result = `$cmd/computer/api/json`; my $parsed = decode_json($result); foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { my $cat = $cur->{'displayName'}; $cat =~ s/\./\_/g; if( $lcount > 0 ){ @@ -103,7 +103,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $result = `$cmd/computer/api/json`; my $parsed = decode_json($result); foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { my $cat = $cur->{'displayName'}; $cat =~ s/\./\_/g; if( $lcount > 0 ){ @@ -130,7 +130,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my %archs = (); my $cat; foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { $cat = $cur->{'monitorData'}{'hudson.node_monitors.ArchitectureMonitor'}; if (exists $archs{$cat} ) {} else { $archs{$cat} = 0; @@ -157,15 +157,15 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { print "graph_vlabel Executors\n"; print "graph_info The Graph shows the Jenkins Executors\n"; print "executors_offline.label offline\n"; - print "executors_offline.type GAUGE\n"; + print "executors_offline.type GAUGE\n"; print "executors_offline.draw AREA\n"; - print "executors_offline.colour 8A8A8A\n"; + print "executors_offline.colour 8A8A8A\n"; print "executors_busy.label busy\n"; - print "executors_busy.type GAUGE\n"; + print "executors_busy.type GAUGE\n"; print "executors_busy.draw STACK\n"; print "executors_idle.label idle\n"; - print "executors_idle.type GAUGE\n"; - print "executors_idle.draw STACK\n"; + print "executors_idle.type GAUGE\n"; + print "executors_idle.draw STACK\n"; } } else { @@ -178,7 +178,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $total_mem = 0; my $used_mem = 0; foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { $monitor = $cur->{'monitorData'}{'hudson.node_monitors.SwapSpaceMonitor'}; $avail_mem += $monitor->{'availablePhysicalMemory'}; $total_mem += $monitor->{'totalPhysicalMemory'}; @@ -193,7 +193,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $result = `$cmd/computer/api/json`; my $parsed = decode_json($result); foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { $monitor = $cur->{'monitorData'}{'hudson.node_monitors.SwapSpaceMonitor'}; my $cat = $cur->{'displayName'}; $cat =~ s/\./\_/g; @@ -206,7 +206,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $result = `$cmd/computer/api/json`; my $parsed = decode_json($result); foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { $monitor = $cur->{'monitorData'}{'hudson.node_monitors.TemporarySpaceMonitor'}; my $cat = $cur->{'displayName'}; $cat =~ s/\./\_/g; @@ -219,7 +219,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $result = `$cmd/computer/api/json`; my $parsed = decode_json($result); foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { $monitor = $cur->{'monitorData'}{'hudson.node_monitors.DiskSpaceMonitor'}; my $cat = $cur->{'displayName'}; $cat =~ s/\./\_/g; @@ -233,7 +233,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my %archs = (); my $cat; foreach my $cur(@{$parsed->{'computer'}}) { - if( $cur->{'offline'} =~ /false$/ ) { + if( !$cur->{'offline'} ) { $cat = $cur->{'monitorData'}{'hudson.node_monitors.ArchitectureMonitor'}; if (exists $archs{$cat} ) { $archs{$cat} += 1; @@ -249,8 +249,8 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { } if( $type eq "executors" ) { - my $busyExecutors = $parsed->{'busyExecutors'}; - my $totalExecutors = 0; + my $busyExecutors = $parsed->{'busyExecutors'}; + my $totalExecutors = 0; my $offlineExecutors = 0; foreach my $cur(@{$parsed->{'computer'}}) { $totalExecutors += $cur->{'numExecutors'}; @@ -258,9 +258,9 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { $offlineExecutors += $cur->{'numExecutors'}; } } - print "executors_idle.value ", ($totalExecutors - $busyExecutors - $offlineExecutors), "\n"; - print "executors_busy.value $busyExecutors\n"; - print "executors_offline.value $offlineExecutors\n"; + print "executors_idle.value ", ($totalExecutors - $busyExecutors - $offlineExecutors), "\n"; + print "executors_busy.value $busyExecutors\n"; + print "executors_offline.value $offlineExecutors\n"; } } diff --git a/plugins/jitsi/jitsi_videobridge b/plugins/jitsi/jitsi_videobridge new file mode 100755 index 00000000..51e434d5 --- /dev/null +++ b/plugins/jitsi/jitsi_videobridge @@ -0,0 +1,135 @@ +#!/bin/sh + +set -e + +: << =cut + +=head1 NAME + +jitsi_videobridge - Monitor sessions and conferences +on a jitsi-videobridge + +=head1 APPLICABLE SYSTEMS + +Jitsi-Videobridge instances + +=head1 CONFIGURATION + +Requires enabled colibri statistics on jitsi-videobridge and an +installed jq, a command-line json processor. + +You may specify the URL where to get the statistics + +[jitsi_videobridge] +env.url http://127.0.0.1:8080/colibri/stats + +... and you may disable the audiochannel when you don't use an +audio gateway. + +=head1 AUTHOR + +Copyright (C) 2020 Sebastian L. (https://momou.ch) + +=head1 LICENSE + +GPLv2 + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +COLIBRI_URL=${url:-"http://127.0.0.1:8080/colibri/stats"} + +case $1 in + + autoconf) + if [ -x /usr/bin/curl ]; then + if [ -x /usr/bin/jq ]; then + curl -s -f -m 2 -I "$COLIBRI_URL" | grep -iq "Content-Type: application/json" && echo "yes" && exit 0 || echo "no (invalid or empty response from colibri API ($COLIBRI_URL))" && exit 0 + else + echo "no (jq not found)" && exit 0 + fi + else + echo "no (/usr/bin/curl not found)" && exit 0 + fi + ;; + + config) + echo "multigraph jitsi_videobridge_sessions" + echo "graph_title Current jitsi-videobridge sessions" + echo "graph_info Current jitsi-videobridge sessions" + echo "graph_vlabel current sessions" + echo "graph_args --base 1000 -l 0" + echo "jitsi_videochannels.label videochannels" + echo "jitsi_videochannels.info current number of videochannels" + echo "jitsi_videochannels.min 0" + echo "jitsi_videochannels.draw AREASTACK" + echo "jitsi_audiochannels.label audiochannels" + echo "jitsi_audiochannels.info current number of audiochannels" + echo "jitsi_audiochannels.min 0" + echo "jitsi_audiochannels.draw AREASTACK" + echo "jitsi_conferences.label conferences" + echo "jitsi_conferences.info current number of conferences" + echo "jitsi_conferences.min 0" + echo "jitsi_conferences.draw LINE2" + echo "jitsi_participants.label participants" + echo "jitsi_participants.info current number of participants" + echo "jitsi_participants.min 0" + echo "jitsi_participants.draw LINE2" + echo "multigraph jitsi_videobridge_streams" + echo "graph_title Current jitsi-videobridge videostreams" + echo "graph_info Current jitsi-videobridge videostreams" + echo "graph_vlabel current videostreams" + echo "graph_args --base 1000 -l 0" + echo "jitsi_videostreams.label videostreams" + echo "jitsi_videostreams.info current number of videostreams" + echo "jitsi_videostreams.min 0" + echo "multigraph jitsi_videobridge_conferences" + echo "graph_title Total of jitsi-videobridge conferences" + echo "graph_info Total of jitsi-videobridge conferences" + echo "graph_vlabel conferences" + echo "graph_args --base 1000 -l 0" + echo "jitsi_total_conferences_created.label created conferences" + echo "jitsi_total_conferences_created.info total of created conferences" + echo "jitsi_total_conferences_created.min 0" + echo "jitsi_total_conferences_created.draw AREA" + echo "jitsi_total_conferences_completed.label completed conferences" + echo "jitsi_total_conferences_completed.info total of completed conferences" + echo "jitsi_total_conferences_completed.min 0" + echo "jitsi_total_conferences_completed.draw AREASTACK" + echo "jitsi_total_partially_failed_conferences.label partially failed conferences" + echo "jitsi_total_partially_failed_conferences.info total of partially failed conferences" + echo "jitsi_total_partially_failed_conferences.min 0" + echo "jitsi_total_partially_failed_conferences.draw AREASTACK" + echo "jitsi_total_failed_conferences.label failed conferences" + echo "jitsi_total_failed_conferences.info total of failed conferences" + echo "jitsi_total_failed_conferences.min 0" + echo "jitsi_total_failed_conferences.draw AREASTACK" + + exit 0 + ;; + +esac + +JSONSTATS=$(curl -s -f -m 2 "$COLIBRI_URL") + +echo "multigraph jitsi_videobridge_sessions" +for KEY in videochannels audiochannels conferences participants; do + VALUE=$(echo "$JSONSTATS" | jq -r ".$KEY // \"U\"") + echo "jitsi_${KEY}.value $VALUE" +done + +echo "multigraph jitsi_videobridge_streams" +VALUE=$(echo "$JSONSTATS" | jq -r ".videostreams // \"U\"") +echo "jitsi_videostreams.value $VALUE" + +echo "multigraph jitsi_videobridge_conferences" +for KEY in total_conferences_created total_failed_conferences total_partially_failed_conferences total_conferences_completed; do + VALUE=$(echo "$JSONSTATS" | jq -r ".$KEY // \"U\"") + echo "jitsi_${KEY}.value $VALUE" +done diff --git a/plugins/jmx/examples/java/java_cpu.conf b/plugins/jmx/examples/java/java_cpu.conf index 1e56cd34..852f466e 100644 --- a/plugins/jmx/examples/java/java_cpu.conf +++ b/plugins/jmx/examples/java/java_cpu.conf @@ -2,7 +2,7 @@ graph_args --upper-limit 100 -l 0 graph_scale no graph_title CPU Usage graph_vlabel 1000* CPU time % -graph_category Java +graph_category appserver graph_order java_cpu_time java_cpu_user_time java_cpu_time.label cpu diff --git a/plugins/jmx/examples/java/java_process_memory.conf b/plugins/jmx/examples/java/java_process_memory.conf index ddbdc4d0..3885f791 100644 --- a/plugins/jmx/examples/java/java_process_memory.conf +++ b/plugins/jmx/examples/java/java_process_memory.conf @@ -1,6 +1,6 @@ graph_title Process Memory graph_vlabel Bytes -graph_category Java +graph_category appserver graph_order java_memory_nonheap_committed java_memory_nonheap_max java_memory_nonheap_used java_memory_heap_committed java_memory_heap_max java_memory_heap_used os_memory_physical os_memory_vm java_memory_nonheap_committed.label non-heap committed @@ -42,5 +42,3 @@ os_memory_vm.label os vmem committed os_memory_vm.jmxObjectName java.lang:type=OperatingSystem os_memory_vm.jmxAttributeName CommittedVirtualMemorySize os_memory_vm.graph no - - diff --git a/plugins/jmx/examples/java/java_threads.conf b/plugins/jmx/examples/java/java_threads.conf index f8b64cd4..06e39b3d 100644 --- a/plugins/jmx/examples/java/java_threads.conf +++ b/plugins/jmx/examples/java/java_threads.conf @@ -1,6 +1,6 @@ graph_title Thread Count graph_vlabel Thread Count -graph_category Java +graph_category appserver graph_order java_thread_count java_thread_count_peak java_thread_count.label count @@ -10,5 +10,3 @@ java_thread_count.jmxAttributeName ThreadCount java_thread_count_peak.label peak java_thread_count_peak.jmxObjectName java.lang:type=Threading java_thread_count_peak.jmxAttributeName PeakThreadCount - - diff --git a/plugins/jmx/examples/tomcat/catalina_requests.conf b/plugins/jmx/examples/tomcat/catalina_requests.conf index acb48175..e6f8629e 100644 --- a/plugins/jmx/examples/tomcat/catalina_requests.conf +++ b/plugins/jmx/examples/tomcat/catalina_requests.conf @@ -1,6 +1,6 @@ graph_title Requests Per Second graph_vlabel requests per second -graph_category Tomcat +graph_category appserver graph_order catalina_request_count catalina_error_count catalina_request_count.label requests diff --git a/plugins/jmx/examples/tomcat/catalina_threads.conf b/plugins/jmx/examples/tomcat/catalina_threads.conf index 32ab44c2..c6e86f03 100644 --- a/plugins/jmx/examples/tomcat/catalina_threads.conf +++ b/plugins/jmx/examples/tomcat/catalina_threads.conf @@ -1,6 +1,6 @@ graph_title Thread Count graph_vlabel Thread Count -graph_category Tomcat +graph_category appserver graph_order catalina_threads_count catalina_threads_busy catalina_threads_busy.label busy diff --git a/plugins/jmx/examples/tomcat/catalina_times.conf b/plugins/jmx/examples/tomcat/catalina_times.conf index e3790259..4e0572b7 100644 --- a/plugins/jmx/examples/tomcat/catalina_times.conf +++ b/plugins/jmx/examples/tomcat/catalina_times.conf @@ -1,9 +1,8 @@ graph_title Response Time graph_vlabel Time, ms -graph_category Rules Engine +graph_category appserver graph_args --upper-limit 100 -l 0 graph_scale no -graph_category Tomcat graph_order catalina_request_count catalina_max_time catalina_proc_time catalina_proc_tpr catalina_request_count.label requests @@ -11,7 +10,7 @@ catalina_request_count.jmxObjectName Catalina:name=http-8080,type=GlobalRequestP catalina_request_count.jmxAttributeName requestCount catalina_request_count.graph no catalina_request_count.type DERIVE -catalina_request_count.min 0 +catalina_request_count.min 0 catalina_proc_time.label time catalina_proc_time.jmxObjectName Catalina:name=http-8080,type=GlobalRequestProcessor @@ -28,5 +27,3 @@ catalina_proc_tpr.cdef catalina_request_count,0,EQ,0,catalina_proc_time,catalina catalina_max_time.label peak catalina_max_time.jmxObjectName Catalina:name=http-8080,type=GlobalRequestProcessor catalina_max_time.jmxAttributeName maxTime - - diff --git a/plugins/jmx/examples/tomcat/catalina_traffic.conf b/plugins/jmx/examples/tomcat/catalina_traffic.conf index f6d9498d..0c6b8817 100644 --- a/plugins/jmx/examples/tomcat/catalina_traffic.conf +++ b/plugins/jmx/examples/tomcat/catalina_traffic.conf @@ -1,6 +1,6 @@ graph_title Traffic graph_vlabel Bytes rec(-)/sent(+) per second -graph_category Tomcat +graph_category appserver graph_order catalina_bytes_received catalina_bytes_sent catalina_bytes_sent.label bps diff --git a/plugins/jmx/readme.txt b/plugins/jmx/readme.txt index cae58a28..20f1e77d 100644 --- a/plugins/jmx/readme.txt +++ b/plugins/jmx/readme.txt @@ -5,7 +5,7 @@ As soon as JMX embedded in Java 5, any Java process may expose parameters to be look http://java.sun.com/j2se/1.5.0/docs/guide/management/agent.html and http://java.sun.com/jmx for details In Java version < 5 it is still possible to expose JMX interface using third party libraries -To see what can be monitored by JMX, run /bin/jconsole.exe and connect to +To see what can be monitored by JMX, run /bin/jconsole.exe and connect to the host/port you setup in your Java process. Some examples are: @@ -41,7 +41,7 @@ env.jmxurl service:jmx:rmi:///jndi/rmi://localhost:1616/jmxrmi To check that all installed properly, try invoke plugins from command line, using links like: root@re:/etc/munin/plugins# ./jmx_java_process_memory config -graph_category Java +graph_category appserver ... root@re:/etc/munin/plugins# ./jmx_java_process_memory java_memory_nonheap_committed.value 35291136 diff --git a/plugins/joomla/joomla-sessions b/plugins/joomla/joomla-sessions index e93f38dd..a75db535 100755 --- a/plugins/joomla/joomla-sessions +++ b/plugins/joomla/joomla-sessions @@ -90,7 +90,7 @@ EOC #Anonym Session count my $anonsessions = `$MYSQL $MYSQLOPTS -e 'SELECT COUNT( session_id ) FROM $DATABASE.jos_session WHERE usertype = "''"'`; -$anonsessions =~ /(\d+)/; +$anonsessions =~ /(\d+)/; print "anonsessions.value ".$1."\n"; #Registered count diff --git a/plugins/jvm/jstat__gccount b/plugins/jvm/jstat__gccount index 165b4118..40215d50 100755 --- a/plugins/jvm/jstat__gccount +++ b/plugins/jvm/jstat__gccount @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # # Plugin for monitor JVM activity - GC Count - # @@ -22,9 +22,11 @@ # # Target: # -# Target Java Virtual Machine to monitor are: -# Sun JDK 5.0 (http://java.sun.com/javase/) (default) -# BEA JRockit 5.0 (http://dev2dev.bea.com/jrockit/) +# Target Java Virtual Machine to monitor are: +# Sun JDK 5.0 (http://java.sun.com/javase/) +# Sun JDK 8.0 (http://java.sun.com/javase/) +# OpenJDK 1.7 .. 11 (https://openjdk.java.net/) +# BEA JRockit 5.0 (http://dev2dev.bea.com/jrockit/) # # Parameters: # @@ -33,140 +35,73 @@ # Config variables: # # pidfilepath - Which file path use. Defaults to '/var/run/jsvc.pid' -# javahome - Defaults to '/usr/local/java/jdk' +# javahome - override automatic detection of JRE directory +# graphtitle - Title of the graph (defaults to PID file location) # -DefaultPidFile="/var/run/jsvc.pid" -DefaultJavaHome="/usr/local/java/jdk" -# -# Environment Variables -# -if [ -z "${pidfilepath}" ]; then - pidfilepath="${DefaultPidFile}" -fi +default_java_home=/usr/lib/jvm/default-java +[ -e "$default_java_home" ] || default_java_home=/usr/local/java/jdk -if [ -z "${graphtitle}" ]; then - graphtitle="${pidfilepath}" -fi +pidfilepath=${pidfilepath:-/var/run/jsvc.pid} +graphtitle=${graphtitle:-$pidfilepath} +JAVA_HOME=${javahome:-$default_java_home} -if [ -z "${javahome}" ]; then - JAVA_HOME="${DefaultJavaHome}" -else - JAVA_HOME="${javahome}" -fi export JAVA_HOME -# -# Functions -# -chk_jdk() -{ - isJRockit=`${JAVA_HOME}/bin/java -version 2>&1 | egrep -i 'jrockit'` - if [ -n "${isJRockit}" ]; then - JDK_TYPE="bea" - else - JDK_TYPE="sun" + +get_jdk_type() { + local version + if "${JAVA_HOME}/bin/java" -version 2>&1 | grep -qi 'jrockit'; then + echo "bea" + else + echo "sun" fi } -chk_version() -{ - Version=`${JAVA_HOME}/bin/java -version 2>&1 | egrep '^java version' | awk '{print $3}' | sed -e 's/\"//g' | cut -d'_' -f 1` - if [ "${Version}" != "1.5.0" ]; then - return 1 - else - return 0 - fi + +print_config() { + echo "graph_title GC Count $graphtitle" + echo 'graph_args -l 0' + echo 'graph_vlabel GC Count(times)' + echo 'graph_total total' + echo 'graph_info GC Count' + echo 'graph_category virtualization' + + echo 'Young_GC.label Young_GC' + echo 'Young_GC.min 0' + if [ "${JDK_TYPE}" = "bea" ]; then + echo 'Old_GC.label Old_GC' + echo 'Old_GC.min 0' + else + echo 'Full_GC.label Full_GC' + echo 'Full_GC.min 0' + fi } -config_common() -{ - echo 'graph_title GC Count' $graphtitle - echo 'graph_args -l 0' - echo 'graph_vlabel GC Count(times)' - echo 'graph_total total' - echo 'graph_info GC Count' - echo 'graph_category virtualization' + +print_stats() { + local pid_num="$1" + local awk_script + if [ "${JDK_TYPE}" = "bea" ]; then + # shellcheck disable=SC2016 + awk_script='{ YC = $4; OC = $5; print "Young_GC.value " YGC; print "Old_GC.value " FGC; }' + else + # List & Order of columns of jstat changes with java versions + # idx["YGC"] is index of YGC column in output (i.e. 13) + # $idx["YGC"] then accesses the value at this position (taken from 2nd line of the output) + # shellcheck disable=SC2016 + awk_script=' + NR==1 { + for (i=1;i<=NF;i++) idx[$i]=i + } + NR==2 { + print "Young_GC.value " $idx["YGC"]; + print "Full_GC.value " $idx["FGC"]; + }' + fi + "${JAVA_HOME}/bin/jstat" -gc "$pid_num" | awk "$awk_script" } -config_sun_jdk() -{ - config_common - - echo 'Young_GC.label Young_GC' - echo 'Young_GC.min 0' - echo 'Full_GC.label Full_GC' - echo 'Full_GC.min 0' - -} - -config_bea_jdk() -{ - config_common - - echo 'Young_GC.label Young_GC' - echo 'Young_GC.min 0' - echo 'Old_GC.label Old_GC' - echo 'Old_GC.min 0' -} - -print_sun_stats() -{ -${JAVA_HOME}/bin/jstat -gc ${PidNum} | tail -1 | awk \ -'{\ - S0C = $1; \ - S1C = $2; \ - S0U = $3; \ - S1U = $4; \ - EC = $5; \ - EU = $6; \ - OC = $7; \ - OU = $8; \ - PC = $9; \ - PU = $10; \ - YGC = $11; \ - YGCT = $12; \ - FGC = $13; \ - FGCT = $14; \ - GCT = $15; \ - \ - S0F = S0C - S0U; \ - S1F = S1C - S1U; \ - EF = EC - EU; \ - OF = OC - OU; \ - PF = PC - PU; \ - \ - print "Young_GC.value " YGC; \ - print "Full_GC.value " FGC; \ -}' -} - -print_bea_stats() -{ -${JAVA_HOME}/bin/jstat -gc ${PidNum} | tail -1 | awk \ -'{\ - HeapSize = $1; \ - NurserySize = $2; \ - UsedHeapSize = $3; \ - YC = $4; \ - OC = $5; \ - YCTime = $6; \ - OCTime = $7; \ - GCTime = $8; \ - YCPauseTime = $9; \ - OCPauseTime = $10; \ - PauseTime = $11; \ - Finalizers = $12; \ - \ - print "Young_GC.value " YC; \ - print "Old_GC.value " OC;\ -}' -} - -# -# common for all argument -# -chk_jdk # # autoconf @@ -175,44 +110,23 @@ if [ "$1" = "autoconf" ]; then if [ ! -x "${JAVA_HOME}/bin/jstat" ]; then echo "no (No jstat found in ${JAVA_HOME}/bin)" - exit 1 - fi - - chk_version - if [ $? != 0 ]; then - echo "no (Java version is invalid)" - exit 1 - fi - - if [ ! -f "${pidfilepath}" -o ! -r "${pidfilepath}" ]; then - echo "no (No such file ${pidfilepath} or cannot read ${pidfilepath}" - exit 1 - fi - - echo "yes" - exit 0 -fi - - -# -# config -# -if [ "$1" = "config" ]; then - if [ "${JDK_TYPE}" == "bea" ]; then - config_bea_jdk + elif [ ! -f "$pidfilepath" ]; then + echo "no (missing file $pidfilepath)" + elif [ ! -r "$pidfilepath" ]; then + echo "no (cannot read $pidfilepath)" else - config_sun_jdk + echo "yes" fi exit 0 fi -# -# Main -# -PidNum=`cat ${pidfilepath}` -if [ "${JDK_TYPE}" == "bea" ]; then - print_bea_stats -else - print_sun_stats +JDK_TYPE=$(get_jdk_type) + + +if [ "$1" = "config" ]; then + print_config + exit 0 fi + +print_stats "$(cat "$pidfilepath")" diff --git a/plugins/jvm/jstat__gctime b/plugins/jvm/jstat__gctime index d3f8e8ff..af5cd455 100755 --- a/plugins/jvm/jstat__gctime +++ b/plugins/jvm/jstat__gctime @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # # Plugin for monitor JVM activity - GC Time - # @@ -22,9 +22,11 @@ # # Target: # -# Target Java Virtual Machine to monitor are: -# Sun JDK 5.0 (http://java.sun.com/javase/) (default) -# BEA JRockit 5.0 (http://dev2dev.bea.com/jrockit/) +# Target Java Virtual Machine to monitor are: +# Sun JDK 5.0 (http://java.sun.com/javase/) +# Sun JDK 8.0 (http://java.sun.com/javase/) +# OpenJDK 1.7 .. 11 (https://openjdk.java.net/) +# BEA JRockit 5.0 (http://dev2dev.bea.com/jrockit/) # # Parameters: # @@ -33,148 +35,85 @@ # Config variables: # # pidfilepath - Which file path use. Defaults to '/var/run/jsvc.pid' -# javahome - Defaults to '/usr/local/java/jdk' +# javahome - override automatic detection of JRE directory +# graphtitle - Title of the graph (defaults to PID file location) # -DefaultPidFile="/var/run/jsvc.pid" -DefaultJavaHome="/usr/local/java/jdk" -# -# Environment Variables -# -if [ -z "${pidfilepath}" ]; then - pidfilepath="${DefaultPidFile}" -fi +default_java_home=/usr/lib/jvm/default-java +[ -e "$default_java_home" ] || default_java_home=/usr/local/java/jdk -if [ -z "${graphtitle}" ]; then - graphtitle="${pidfilepath}" -fi +pidfilepath=${pidfilepath:-/var/run/jsvc.pid} +graphtitle=${graphtitle:-$pidfilepath} +JAVA_HOME=${javahome:-$default_java_home} -if [ -z "${javahome}" ]; then - JAVA_HOME="${DefaultJavaHome}" -else - JAVA_HOME="${javahome}" -fi export JAVA_HOME -# -# Functions -# -chk_jdk() -{ - isJRockit=`${JAVA_HOME}/bin/java -version 2>&1 | egrep -i 'jrockit'` - if [ -n "${isJRockit}" ]; then - JDK_TYPE="bea" - else - JDK_TYPE="sun" + +get_jdk_type() { + local version + if "${JAVA_HOME}/bin/java" -version 2>&1 | grep -qi 'jrockit'; then + echo "bea" + else + echo "sun" fi } -chk_version() -{ - Version=`${JAVA_HOME}/bin/java -version 2>&1 | egrep '^java version' | awk '{print $3}' | sed -e 's/\"//g' | cut -d'_' -f 1` - if [ "${Version}" != "1.5.0" ]; then - return 1 - else - return 0 - fi + +print_config() { + echo "graph_title GC Time $graphtitle" + echo 'graph_args -l 0' + echo 'graph_vlabel GC Time(sec)' + echo 'graph_total total' + echo 'graph_info GC Time' + echo 'graph_category virtualization' + + echo 'Young_GC.label Young_GC' + echo 'Young_GC.min 0' + if [ "${JDK_TYPE}" = "bea" ]; then + echo 'Old_GC.label Old_GC' + echo 'Old_GC.min 0' + echo 'Young_Pause.label Young_GC Pause' + echo 'Young_Pause.min 0' + echo 'Old_Pause.label Old_GC Pause' + echo 'Old_Pause.min 0' + else + echo 'Full_GC.label Full_GC' + echo 'Full_GC.min 0' + fi } -config_common() -{ - echo 'graph_title GC Time' $graphtitle - echo 'graph_args -l 0' - echo 'graph_vlabel GC Time(sec)' - echo 'graph_total total' - echo 'graph_info GC Time' - echo 'graph_category virtualization' + +print_stats() { + local pid_num="$1" + local awk_script + if [ "${JDK_TYPE}" = "bea" ]; then + # shellcheck disable=SC2016 + awk_script='{ + YCTime = $6; + OCTime = $7; + YCPauseTime = $9; + OCPauseTime = $10; + print "Young_GC.value " YCTime; + print "Old_GC.value " OCTime; + print "Young_Pause.value " YCPauseTime; + print "Old_Pause.value " OCPauseTime; }' + else + # List & Order of columns of jstat changes with java versions + # idx["YGC"] is index of YGC column in output (i.e. 13) + # $idx["YGC"] then accesses the value at this position (taken from 2nd line of the output) + # shellcheck disable=SC2016 + awk_script=' + (NR==1) { + for (i=1;i<=NF;i++) idx[$i]=i + } + (NR==2) { + print "Young_GC.value " $idx["YGCT"]; + print "Full_GC.value " $idx["FGCT"]; + }' + fi + "${JAVA_HOME}/bin/jstat" -gc "$pid_num" | awk "$awk_script" } -config_sun_jdk() -{ - config_common - - echo 'Young_GC.label Young_GC' - echo 'Young_GC.min 0' - echo 'Full_GC.label Full_GC' - echo 'Full_GC.min 0' - -} - -config_bea_jdk() -{ - config_common - - echo 'Young_GC.label Young_GC' - echo 'Young_GC.min 0' - echo 'Old_GC.label Old_GC' - echo 'Old_GC.min 0' - echo 'Young_Pause.label Young_GC Pause' - echo 'Young_Pause.min 0' - echo 'Old_Pause.label Old_GC Pause' - echo 'Old_Pause.min 0' - -} - -print_sun_stats() -{ -${JAVA_HOME}/bin/jstat -gc ${PidNum} | tail -1 | awk \ -'{\ - S0C = $1; \ - S1C = $2; \ - S0U = $3; \ - S1U = $4; \ - EC = $5; \ - EU = $6; \ - OC = $7; \ - OU = $8; - PC = $9; \ - PU = $10; \ - YGC = $11; \ - YGCT = $12; \ - FGC = $13; \ - FGCT = $14; \ - GCT = $15; \ - - \ - S0F = S0C - S0U; \ - S1F = S1C - S1U; \ - EF = EC - EU; \ - OF = OC - OU; \ - PF = PC - PU; \ - \ - print "Young_GC.value " YGCT; \ - print "Full_GC.value " FGCT; \ -}' -} - -print_bea_stats() -{ -${JAVA_HOME}/bin/jstat -gc ${PidNum} | tail -1 | awk \ -'{\ - HeapSize = $1; \ - NurserySize = $2; \ - UsedHeapSize = $3; \ - YC = $4; \ - OC = $5; \ - YCTime = $6; \ - OCTime = $7; \ - GCTime = $8; \ - YCPauseTime = $9; \ - OCPauseTime = $10; \ - PauseTime = $11; \ - Finalizers = $12; \ - \ - print "Young_GC.value " YCTime; \ - print "Old_GC.value " OCTime; \ - print "Young_Pause.value " YCPauseTime; \ - print "Old_Pause.value " OCPauseTime -}' -} - -# -# common for all argument -# -chk_jdk # # autoconf @@ -183,44 +122,22 @@ if [ "$1" = "autoconf" ]; then if [ ! -x "${JAVA_HOME}/bin/jstat" ]; then echo "no (No jstat found in ${JAVA_HOME}/bin)" - exit 1 - fi - - chk_version - if [ $? != 0 ]; then - echo "no (Java version is invalid)" - exit 1 - fi - - if [ ! -f "${pidfilepath}" -o ! -r "${pidfilepath}" ]; then - echo "no (No such file ${pidfilepath} or cannot read ${pidfilepath}" - exit 1 - fi - - echo "yes" - exit 0 -fi - - -# -# config -# -if [ "$1" = "config" ]; then - if [ "${JDK_TYPE}" == "bea" ]; then - config_bea_jdk + elif [ ! -f "$pidfilepath" ]; then + echo "no (missing file $pidfilepath)" + elif [ ! -r "$pidfilepath" ]; then + echo "no (cannot read $pidfilepath)" else - config_sun_jdk + echo "yes" fi exit 0 fi -# -# Main -# -PidNum=`cat ${pidfilepath}` -if [ "${JDK_TYPE}" == "bea" ]; then - print_bea_stats -else - print_sun_stats +JDK_TYPE=$(get_jdk_type) + + +if [ "$1" = "config" ]; then + print_config fi + +print_stats "$(cat "$pidfilepath")" diff --git a/plugins/jvm/jstat__heap b/plugins/jvm/jstat__heap index de830218..e89d434b 100755 --- a/plugins/jvm/jstat__heap +++ b/plugins/jvm/jstat__heap @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # # Plugin for monitor JVM activity - Heap Usage - # @@ -22,9 +22,11 @@ # # Target: # -# Target Java Virtual Machine to monitor are: -# Sun JDK 5.0 (http://java.sun.com/javase/) (default) -# BEA JRockit 5.0 (http://dev2dev.bea.com/jrockit/) +# Target Java Virtual Machine to monitor are: +# Sun JDK 5.0 (http://java.sun.com/javase/) +# Sun JDK 8.0 (http://java.sun.com/javase/) +# OpenJDK 1.7 .. 11 (https://openjdk.java.net/) +# BEA JRockit 5.0 (http://dev2dev.bea.com/jrockit/) # # Parameters: # @@ -33,160 +35,116 @@ # Config variables: # # pidfilepath - Which file path use. Defaults to '/var/run/jsvc.pid' -# javahome - Defaults to '/usr/local/java/jdk' +# javahome - override automatic detection of JRE directory +# graphtitle - Title of the graph (defaults to PID file location) # -DefaultPidFile="/var/run/jsvc.pid" -DefaultJavaHome="/usr/local/java/jdk" -# -# Environment Variables -# -if [ -z "${pidfilepath}" ]; then - pidfilepath="${DefaultPidFile}" -fi +default_java_home=/usr/lib/jvm/default-java +[ -e "$default_java_home" ] || default_java_home=/usr/local/java/jdk -if [ -z "${graphtitle}" ]; then - graphtitle="${pidfilepath}" -fi +pidfilepath=${pidfilepath:-/var/run/jsvc.pid} +graphtitle=${graphtitle:-$pidfilepath} +JAVA_HOME=${javahome:-$default_java_home} -if [ -z "${javahome}" ]; then - JAVA_HOME="${DefaultJavaHome}" -else - JAVA_HOME="${javahome}" -fi export JAVA_HOME -# -# Functions -# -chk_jdk() -{ - isJRockit=`${JAVA_HOME}/bin/java -version 2>&1 | egrep -i 'jrockit'` - if [ -n "${isJRockit}" ]; then - JDK_TYPE="bea" - else - JDK_TYPE="sun" + +get_jdk_type() { + local version + if "${JAVA_HOME}/bin/java" -version 2>&1 | grep -qi 'jrockit'; then + echo "bea" + else + echo "sun" fi } -chk_version() -{ - Version=`${JAVA_HOME}/bin/java -version 2>&1 | egrep '^java version' | awk '{print $3}' | sed -e 's/\"//g' | cut -d'_' -f 1` - if [ "${Version}" != "1.5.0" ]; then - return 1 - else - return 0 - fi + +print_config() { + echo "graph_title Heap Usage $graphtitle" + echo "graph_args --base 1024 -l 0" + echo "graph_vlabel Heap Usage(Bytes)" + echo "graph_info Heap Usage" + echo "graph_category virtualization" + + if [ "${JDK_TYPE}" = "bea" ]; then + echo "NurserySize.label NurserySize" + echo "HeapSize.label HeapSize" + echo "UsedHeapSize.label UsedHeapSize" + echo "NurserySize.draw AREA" + echo "HeapSize.draw STACK" + echo "UsedHeapSize.draw STACK" + else + echo "Eden_Used.label Eden_Used" + echo "Eden_Free.label Eden_Free" + echo "Survivor0_Used.label Survivor0_Used" + echo "Survivor0_Free.label Survivor0_Free" + echo "Survivor1_Used.label Survivor1_Used" + echo "Survivor1_Free.label Survivor1_Free" + echo "Old_Used.label Old_Used" + echo "Old_Free.label Old_Free" + echo "Permanent_Used.label Permanent_Used" + echo "Permanent_Free.label Permanent_Free" + echo "Eden_Used.draw AREA" + echo "Eden_Free.draw STACK" + echo "Survivor0_Used.draw STACK" + echo "Survivor0_Free.draw STACK" + echo "Survivor1_Used.draw STACK" + echo "Survivor1_Free.draw STACK" + echo "Old_Used.draw STACK" + echo "Old_Free.draw STACK" + echo "Permanent_Used.draw STACK" + echo "Permanent_Free.draw STACK" + fi } -config_common() -{ - echo "graph_title Heap Usage" $graphtitle - echo "graph_args --base 1024 -l 0" - echo "graph_vlabel Heap Usage(Bytes)" - echo "graph_info Heap Usage" - echo "graph_category virtualization" + +print_stats() { + local pid_num="$1" + local awk_script + if [ "${JDK_TYPE}" = "bea" ]; then + # shellcheck disable=SC2016 + awk_script='{ + HeapSize = $1; + NurserySize = $2; + UsedHeapSize = $3; + print "NurserySize.value " NurserySize * 1024; + print "HeapSize.value " HeapSize * 1024; + print "UsedHeapSize.value " UsedHeapSize * 1024; }' + else + # List & Order of columns of jstat changes with java versions + # idx["YGC"] is index of YGC column in output (i.e. 13) + # $idx["YGC"] then accesses the value at this position (taken from 2nd line of the output) + # shellcheck disable=SC2016 + awk_script=' + NR==1 { + for (i=1;i<=NF;i++) idx[$i]=i + } + NR==2 { + S0F = $idx["S0C"] - $idx["S0U"]; + S1F = $idx["S1C"] - $idx["S1U"]; + EF = $idx["EC"] - $idx["EU"]; + OF = $idx["OC"] - $idx["OU"]; + # Java <8 has Permanent Generation (PU,PC columns), while >=8 has/names it Metaspace (MU,MC) + if (idx["MU"] == "") { + idx["MU"] = idx["PU"]; + idx["MC"] = idx["PC"]; + } + MF = $idx["MC"] - $idx["MU"]; + print "Eden_Used.value " $idx["EU"] * 1024; + print "Eden_Free.value " EF * 1024; + print "Survivor0_Used.value " $idx["S0U"] * 1024; + print "Survivor0_Free.value " S0F * 1024; + print "Survivor1_Used.value " $idx["S1U"] * 1024; + print "Survivor1_Free.value " S1F * 1024; + print "Old_Used.value " $idx["OU"] * 1024; + print "Old_Free.value " OF * 1024; + print "Permanent_Used.value " $idx["MU"] * 1024; + print "Permanent_Free.value " MF * 1024; + }' + fi + "${JAVA_HOME}/bin/jstat" -gc "$pid_num" | awk "$awk_script" } -config_sun_jdk() -{ - config_common - - echo "Eden_Used.label Eden_Used" - echo "Eden_Free.label Eden_Free" - echo "Survivor0_Used.label Survivor0_Used" - echo "Survivor0_Free.label Survivor0_Free" - echo "Survivor1_Used.label Survivor1_Used" - echo "Survivor1_Free.label Survivor1_Free" - echo "Old_Used.label Old_Used" - echo "Old_Free.label Old_Free" - echo "Permanent_Used.label Permanent_Used" - echo "Permanent_Free.label Permanent_Free" - echo "Eden_Used.draw AREA" - echo "Eden_Free.draw STACK" - echo "Survivor0_Used.draw STACK" - echo "Survivor0_Free.draw STACK" - echo "Survivor1_Used.draw STACK" - echo "Survivor1_Free.draw STACK" - echo "Old_Used.draw STACK" - echo "Old_Free.draw STACK" - echo "Permanent_Used.draw STACK" - echo "Permanent_Free.draw STACK" -} - -config_bea_jdk() -{ - config_common - - echo "NurserySize.label NurserySize" - echo "HeapSize.label HeapSize" - echo "UsedHeapSize.label UsedHeapSize" - echo "NurserySize.draw AREA" - echo "HeapSize.draw STACK" - echo "UsedHeapSize.draw STACK" -} - -print_sun_stats() -{ -${JAVA_HOME}/bin/jstat -gc ${PidNum} | tail -1 | awk \ -'{\ - S0C = $1; \ - S1C = $2; \ - S0U = $3; \ - S1U = $4; \ - EC = $5; \ - EU = $6; \ - OC = $7; \ - OU = $8; - PC = $9; \ - PU = $10; \ - \ - S0F = S0C - S0U; \ - S1F = S1C - S1U; \ - EF = EC - EU; \ - OF = OC - OU; \ - PF = PC - PU; \ - \ - print "Eden_Used.value " EU * 1024; \ - print "Eden_Free.value " EF * 1024; \ - print "Survivor0_Used.value " S0U * 1024; \ - print "Survivor0_Free.value " S0F * 1024; \ - print "Survivor1_Used.value " S1U * 1024; \ - print "Survivor1_Free.value " S1F * 1024; \ - print "Old_Used.value " OU * 1024; \ - print "Old_Free.value " OF * 1024; \ - print "Permanent_Used.value " PU * 1024; \ - print "Permanent_Free.value " PF * 1024; \ -}' -} - -print_bea_stats() -{ -${JAVA_HOME}/bin/jstat -gc ${PidNum} | tail -1 | awk \ -'{\ - HeapSize = $1; \ - NurserySize = $2; \ - UsedHeapSize = $3; \ - YC = $4; \ - OC = $5; \ - YCTime = $6; \ - OCTime = $7; \ - GCTime = $8; \ - YCPauseTime = $9; \ - OCPauseTime = $10; \ - PauseTime = $11; \ - Finalizers = $12; \ - \ - print "NurserySize.value " NurserySize * 1024; \ - print "HeapSize.value " UsedHeapSize * 1024; \ - print "UsedHeapSize.value " UsedHeapSize * 1024; \ -}' -} - -# -# common for all argument -# -chk_jdk # # autoconf @@ -195,44 +153,22 @@ if [ "$1" = "autoconf" ]; then if [ ! -x "${JAVA_HOME}/bin/jstat" ]; then echo "no (No jstat found in ${JAVA_HOME}/bin)" - exit 1 - fi - - chk_version - if [ $? != 0 ]; then - echo "no (Java version is invalid)" - exit 1 - fi - - if [ ! -f "${pidfilepath}" -o ! -r "${pidfilepath}" ]; then - echo "no (No such file ${pidfilepath} or cannot read ${pidfilepath}" - exit 1 - fi - - echo "yes" - exit 0 -fi - - -# -# config -# -if [ "$1" = "config" ]; then - if [ "${JDK_TYPE}" == "bea" ]; then - config_bea_jdk + elif [ ! -f "$pidfilepath" ]; then + echo "no (missing file $pidfilepath)" + elif [ ! -r "$pidfilepath" ]; then + echo "no (cannot read $pidfilepath)" else - config_sun_jdk + echo "yes" fi exit 0 fi -# -# Main -# -PidNum=`cat ${pidfilepath}` -if [ "${JDK_TYPE}" == "bea" ]; then - print_bea_stats -else - print_sun_stats +JDK_TYPE=$(get_jdk_type) + + +if [ "$1" = "config" ]; then + print_config fi + +print_stats "$(cat "$pidfilepath")" diff --git a/plugins/jvm/jvm_sun_memory b/plugins/jvm/jvm_sun_memory index c622e1dc..290c03a0 100755 --- a/plugins/jvm/jvm_sun_memory +++ b/plugins/jvm/jvm_sun_memory @@ -19,12 +19,12 @@ # env.graphtitle (default: "Sun Java") # You need to configure your Sun JVM with these options: -# -verbose:gc +# -verbose:gc # -Xloggc:/var/log/app/jvm/gc.log # -XX:+PrintGCTimeStamps # -XX:+PrintGCDetails -# History: +# History: # This plugin was developed by various people over some time - no logs # of this has been found. - In 2006 significant contributions was @@ -44,10 +44,10 @@ my $grtitle = $ENV{graphtitle} || "Sun Java"; # Title that appears on munin graph my $title = "$grtitle memory usage"; # Extended information that appears below munin graph -my $info = "Write som info about this graph..."; +my $info = "Write some info about this graph..."; sub analyze_record { - # Match all interesting elements of a record and insert them + # Match all interesting elements of a record and insert them # into a hash my $record = shift; @@ -145,7 +145,7 @@ while () { ($now,undef) = split(/:/,$_,2); $now = $lastnow unless $now; } - + if (/.+Tenured.+/) { $record = $_; } elsif (/^:.+/) { diff --git a/plugins/jvm/jvm_sun_minorgcs b/plugins/jvm/jvm_sun_minorgcs index caa99a9a..23509a51 100755 --- a/plugins/jvm/jvm_sun_minorgcs +++ b/plugins/jvm/jvm_sun_minorgcs @@ -19,12 +19,12 @@ # env.grname (default: "sun-jvm". Used for state file-name) # You need to configure your Sun JVM with these options: -# -verbose:gc +# -verbose:gc # -Xloggc:/var/log/app/jvm/gc.log # -XX:+PrintGCTimeStamps # -XX:+PrintGCDetails -# History: +# History: # This plugin was developed by various people over some time - no logs # of this has been found. - In 2006 significant contributions was @@ -94,7 +94,7 @@ open (OUT, ">$statefile") or die "Could not open $statefile for reading: $!\n"; print OUT "$pos:$count:$timespent\n"; close OUT; -sub parseFile { +sub parseFile { my ($fname, $start, $count, $timespent) = @_; my @secs; diff --git a/plugins/jvm/jvm_sun_tenuredgcs b/plugins/jvm/jvm_sun_tenuredgcs index 9ea42e71..78487e06 100755 --- a/plugins/jvm/jvm_sun_tenuredgcs +++ b/plugins/jvm/jvm_sun_tenuredgcs @@ -17,12 +17,12 @@ # env.graphtitle (default: "Sun Java") # You need to configure your Sun JVM with these options: -# -verbose:gc +# -verbose:gc # -Xloggc:/var/log/app/jvm/gc.log # -XX:+PrintGCTimeStamps # -XX:+PrintGCDetails -# History: +# History: # This plugin was developed by various people over some time - no logs # of this has been found. - In 2006 significant contributions was diff --git a/plugins/kamailio/kamailio_memory b/plugins/kamailio/kamailio_memory index fa256a3c..83a3366a 100755 --- a/plugins/kamailio/kamailio_memory +++ b/plugins/kamailio/kamailio_memory @@ -31,9 +31,9 @@ GPLv2 use strict; -my %WANTED = ( "kamailio" => "ram_total", +my %WANTED = ( "kamailio" => "ram_total", "rtpproxy" => "ram_rtpproxy", - "freeswitch" => "ram_freeswitch", + "freeswitch" => "ram_freeswitch", ); my %VALUE = ( "ram_total" => 0, diff --git a/plugins/kamailio/kamailio_mysql_shared_memory b/plugins/kamailio/kamailio_mysql_shared_memory index d7405dfa..d163c947 100755 --- a/plugins/kamailio/kamailio_mysql_shared_memory +++ b/plugins/kamailio/kamailio_mysql_shared_memory @@ -93,9 +93,9 @@ use strict; my $MYSQLADMIN = $ENV{mysql} || "mysql"; my $COMMAND = "$MYSQLADMIN $ENV{mysqlauth} $ENV{kamailiodb} -e 'select * from statistics order by id desc limit 1\\G'"; -my %WANTED = ( "shm_free_used_size" => "shmem_total", +my %WANTED = ( "shm_free_used_size" => "shmem_total", "shm_real_used_size" => "shmem_real_used", - "shm_used_size" => "shmem_used", + "shm_used_size" => "shmem_used", ); my %VALUE = ( "shmem_total" => 0, diff --git a/plugins/kamailio/kamailio_transactions_users b/plugins/kamailio/kamailio_transactions_users index f7f68220..9af486ec 100755 --- a/plugins/kamailio/kamailio_transactions_users +++ b/plugins/kamailio/kamailio_transactions_users @@ -94,8 +94,8 @@ my $MYSQL = $ENV{mysql} || "mysql"; my $COMMAND = "$MYSQL $ENV{mysqlauth} $ENV{kamailiodb} -e 'select * from statistics order by id desc limit 1\\G'"; my %WANTED = ( "ul_users" => "users", - "ul_contact" => "contacts", - "tm_active" => "transactions", + "ul_contact" => "contacts", + "tm_active" => "transactions", ); my %VALUE = ( "users" => 0, diff --git a/plugins/keystone/keystone_stats b/plugins/keystone/keystone_stats index bed779c8..ec0e596f 100755 --- a/plugins/keystone/keystone_stats +++ b/plugins/keystone/keystone_stats @@ -30,7 +30,7 @@ def print_config(): print 'graph_title Keystone Stats' print 'graph_vlabel count' print 'graph_args --base 1000 --lower-limit 0' - print 'graph_category other' + print 'graph_category auth' print 'graph_scale no' print 'graph_info This graph shows stats about keystone: ' + (', ').join(stats) for field in stats: @@ -55,7 +55,7 @@ def get_status(): total['users'] += 1 if user['enabled']: enabled['users'] += 1 - + # Ldap and pam driver don't support get_all_tenants() # kvs and sql implement get_tenants() instead of get_all_tenants() # Whoo: None of backend implements the correct function @@ -89,7 +89,7 @@ if __name__ == '__main__': if sys.argv[1] == "config": print_config() elif sys.argv[1] == "autoconf": - if not successful_import: + if not successful_import: print 'no (failed import keystone module)' sys.exit(0) try: diff --git a/plugins/libvirt/kvm_cpu b/plugins/libvirt/kvm_cpu index d8ec7934..4eebbd81 100755 --- a/plugins/libvirt/kvm_cpu +++ b/plugins/libvirt/kvm_cpu @@ -25,7 +25,7 @@ def config(vm_names): base_config = """graph_title KVM Virtual Machine CPU usage graph_vlabel %% -graph_category Virtualization +graph_category virtualization graph_scale no graph_period second graph_info This graph shows the current CPU used by virtual machines @@ -52,6 +52,14 @@ def clean_vm_name(vm_name): if suffix: vm_name = re.sub(suffix,'',vm_name) + # proxmox uses kvm with -name parameter + parts = vm_name.split('\x00') + if (parts[0].endswith('kvm')): + try: + return parts[parts.index('-name')+1] + except ValueError: + pass + return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name) def detect_kvm(): @@ -63,29 +71,29 @@ def detect_kvm(): def find_vm_names(pids): '''Find and clean vm names from pids - @return a dictionnary of {pids : cleaned vm name} + @return a dictionary of {pids : cleaned vm name} ''' result = {} for pid in pids: cmdline = open("/proc/%s/cmdline" % pid, "r") - result[pid] = clean_vm_name(re.sub(r"^.*-name\x00([a-zA-Z0-9.-_-]*)\x00\-.*$",r"\1", cmdline.readline())) + result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline())) return result - + def list_pids(): ''' Find the pid of kvm processes @return a list of pids from running kvm ''' - pid = Popen("pidof qemu-system-x86_64", shell=True, stdout=PIPE) + pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE) return pid.communicate()[0].split() def fetch(vms): ''' Fetch values for a list of pids - @param dictionnary {kvm_pid: cleaned vm name} + @param dictionary {kvm_pid: cleaned vm name} ''' for ( pid, name ) in vms.iteritems(): ( user, system ) = open("/proc/%s/stat" % pid, 'r').readline().split(' ')[13:15] print '%s_cpu.value %d' % ( name, int(user) + int(system) ) - + if __name__ == "__main__": if len(sys.argv) > 1: if sys.argv[1] in ['autoconf', 'detect']: diff --git a/plugins/libvirt/kvm_io b/plugins/libvirt/kvm_io index 065f1a19..1e39126c 100755 --- a/plugins/libvirt/kvm_io +++ b/plugins/libvirt/kvm_io @@ -23,7 +23,7 @@ def config(vm_names): ''' base_config = """graph_title KVM Virtual Machine IO usage graph_vlabel Bytes read(-)/written(+) per second -graph_category Virtualization +graph_category virtualization graph_info This graph shows the block device I/O used of virtual machines graph_args --base 1024 """ @@ -33,12 +33,11 @@ graph_args --base 1024 print "%s_read.label %s" % (vm, vm) print "%s_read.type COUNTER" % vm print "%s_read.min 0" % vm - print "%s_read.draw LINE1" % vm print "%s_read.info I/O used by virtual machine %s" % (vm, vm) + print "%s_read.graph no" % vm print "%s_write.label %s" % (vm, vm) print "%s_write.type COUNTER" % vm - print "%s_write.min 0" % vm - print "%s_write.draw LINE1" % vm + print "%s_write.min 0" % vm print "%s_write.negative %s_read" % (vm, vm) print "%s_write.info I/O used by virtual machine %s" % (vm, vm) @@ -51,12 +50,18 @@ def clean_vm_name(vm_name): suffix = os.getenv('vmsuffix') if suffix: vm_name = re.sub(suffix,'',vm_name) - + # proxmox uses kvm with -name parameter + parts = vm_name.split('\x00') + if (parts[0].endswith('kvm')): + try: + return parts[parts.index('-name')+1] + except ValueError: + pass return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name) - + def fetch(vms): ''' Fetch values for a list of pids - @param dictionnary {kvm_pid: cleaned vm name} + @param dictionary {kvm_pid: cleaned vm name} ''' res = {} for pid in vms: @@ -64,11 +69,11 @@ def fetch(vms): for line in f.readlines(): if "read_bytes" in line: read = line.split()[1] - print "%s_read.value %s" % (vms[pid], read) + print "%s_read.value %s" % (vms[pid], read) if "write_bytes" in line: write = line.split()[1] - print "%s_write.value %s" % (vms[pid], write) - break + print "%s_write.value %s" % (vms[pid], write) + break f.close() def detect_kvm(): @@ -80,21 +85,21 @@ def detect_kvm(): def find_vm_names(pids): '''Find and clean vm names from pids - @return a dictionnary of {pids : cleaned vm name} + @return a dictionary of {pids : cleaned vm name} ''' result = {} for pid in pids: cmdline = open("/proc/%s/cmdline" % pid, "r") - result[pid] = clean_vm_name(re.sub(r"^.*-name\x00([a-zA-Z0-9.-_-]*)\x00\-.*$",r"\1", cmdline.readline())) + result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline())) return result - + def list_pids(): ''' Find the pid of kvm processes @return a list of pids from running kvm ''' - pid = Popen("pidof qemu-system-x86_64", shell=True, stdout=PIPE) + pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE) return pid.communicate()[0].split() - + if __name__ == "__main__": if len(sys.argv) > 1: if sys.argv[1] in ['autoconf', 'detect']: diff --git a/plugins/libvirt/kvm_mem b/plugins/libvirt/kvm_mem index 66e7f399..eaa86fda 100755 --- a/plugins/libvirt/kvm_mem +++ b/plugins/libvirt/kvm_mem @@ -23,7 +23,7 @@ def config(vm_names): ''' base_config = """graph_title KVM Virtual Machine Memory usage graph_vlabel Bytes -graph_category Virtualization +graph_category virtualization graph_info This graph shows the current amount of memory used by virtual machines graph_args --base 1024 """ @@ -49,11 +49,19 @@ def clean_vm_name(vm_name): if suffix: vm_name = re.sub(suffix,'',vm_name) + # proxmox uses kvm with -name parameter + parts = vm_name.split('\x00') + if (parts[0].endswith('kvm')): + try: + return parts[parts.index('-name')+1] + except ValueError: + pass + return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name) - + def fetch(vms): ''' Fetch values for a list of pids - @param dictionnary {kvm_pid: cleaned vm name} + @param dictionary {kvm_pid: cleaned vm name} ''' res = {} for pid in vms: @@ -61,12 +69,12 @@ def fetch(vms): cmdline = open("/proc/%s/cmdline" % pid, "r") amount = re.sub(r"^.*-m\x00(.*)\x00-smp.*$",r"\1", cmdline.readline()) amount = int(amount) * 1024 * 1024 - print "%s_mem.value %s" % (vms[pid], amount) + print "%s_mem.value %s" % (vms[pid], amount) except: cmdline = open("/proc/%s/cmdline" % pid, "r") amount = re.sub(r"^.*-m\x00(\d+).*$",r"\1", cmdline.readline()) amount = int(amount) * 1024 * 1024 - print "%s_mem.value %s" % (vms[pid], amount) + print "%s_mem.value %s" % (vms[pid], amount) def detect_kvm(): ''' Check if kvm is installed @@ -77,21 +85,21 @@ def detect_kvm(): def find_vm_names(pids): '''Find and clean vm names from pids - @return a dictionnary of {pids : cleaned vm name} + @return a dictionary of {pids : cleaned vm name} ''' result = {} for pid in pids: cmdline = open("/proc/%s/cmdline" % pid, "r") - result[pid] = clean_vm_name(re.sub(r"^.*-name\x00([a-zA-Z0-9.-_-]*)\x00\-.*$",r"\1", cmdline.readline())) + result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline())) return result - + def list_pids(): ''' Find the pid of kvm processes @return a list of pids from running kvm ''' - pid = Popen("pidof qemu-system-x86_64", shell=True, stdout=PIPE) + pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE) return pid.communicate()[0].split() - + if __name__ == "__main__": if len(sys.argv) > 1: if sys.argv[1] in ['autoconf', 'detect']: diff --git a/plugins/libvirt/kvm_net b/plugins/libvirt/kvm_net index dfe2cd79..381bd511 100755 --- a/plugins/libvirt/kvm_net +++ b/plugins/libvirt/kvm_net @@ -1,141 +1,240 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: set fileencoding=utf-8 -# -# Munin plugin to show the network I/O per vm -# -# Copyright Igor Borodikhin -# -# License : GPLv3 -# -# -# parsed environment variables: -# vmsuffix: part of vm name to be removed -# -#%# capabilities=autoconf -#%# family=contrib +#!/usr/bin/python3 +""" -import re, os, sys +=head1 NAME + +kvm_net - Munin plugin to show the network I/O per VM + + +=head1 APPLICABLE SYSTEMS + +Virtualization server with VMs based on KVM may be able to track the network +traffic of their VMs, if the KVM processes are started in a specific way. + +Probably proxmox-based virtualization hosts fit into this category. + +You can easily check if your KVM processes are started in the expected way, by +running the following command: + + ps -ef | grep "netdev.*ifname=" + +The plugin can be used, if the above command outputs one line for every +currently running VM. + +In all other cases you need to use other munin plugins instead, e.g. "libvirt". + + +=head1 CONFIGURATION + +parsed environment variables: + + * vmsuffix: part of vm name to be removed + + +=head1 AUTHOR + +Copyright (C) 2012 - Igor Borodikhin +Copyright (C) 2018 - Lars Kruse + + +=head1 LICENSE + +GPLv3 + + +=head1 MAGIC MARKERS + + #%# capabilities=autoconf + #%# family=contrib + +=cut +""" + +import os +import re from subprocess import Popen, PIPE +import sys + + +VM_NAME_REGEX = re.compile("^.*\x00-{arg_name}\x00(.+)\x00.*$") +KVM_INTERFACE_NAME_REGEX = re.compile("(?:^|,)ifname=([^,]+)(?:,|$)") + def config(vm_names): - ''' Print the plugin's config + """ Print the plugin's config + @param vm_names : a list of "cleaned" vms' name - ''' - base_config = """graph_title KVM Network I/O -graph_vlabel Bytes rx(-)/tx(+) per second -graph_category Virtualization -graph_info This graph shows the network I/O of the virtual machines -graph_args --base 1024 """ - print base_config + print("graph_title KVM Network I/O") + print("graph_vlabel Bytes rx(-)/tx(+) per second") + print("graph_category virtualization") + print("graph_args --base 1024") + print("graph_info This graph shows the network I/O of the virtual " + "machines. It is only usable for VMs that were started in a very " + "specific way. If you see no values in the diagrams, then you " + "should check, if the command \"ps -ef | grep 'netdev.*ifname='\" " + "returns one line of output for every running VM. If there is no " + "output, then you need to change the setup of your VMs or you need " + "to use a different munin plugin for monitoring the network traffic " + "(e.g. 'libvirt').") + print() for vm in vm_names: - print "%s_in.label %s" % (vm, vm) - print "%s_in.type COUNTER" % vm - print "%s_in.min 0" % vm - print "%s_in.draw LINE2" % vm - print "%s_out.negative %s_in" % (vm, vm) - print "%s_out.label %s" % (vm, vm) - print "%s_out.type COUNTER" % vm - print "%s_out.min 0" % vm - print "%s_out.draw LINE2" % vm + print("%s_in.label %s" % (vm, vm)) + print("%s_in.type COUNTER" % vm) + print("%s_in.min 0" % vm) + print("%s_in.graph no" % vm) + print("%s_out.negative %s_in" % (vm, vm)) + print("%s_out.label %s" % (vm, vm)) + print("%s_out.type COUNTER" % vm) + print("%s_out.min 0" % vm) + def clean_vm_name(vm_name): - ''' Replace all special chars + """ Replace all special chars + @param vm_name : a vm's name @return cleaned vm's name - ''' + """ # suffix part defined in conf - suffix = os.getenv('vmsuffix') + suffix = os.getenv("vmsuffix") if suffix: - vm_name = re.sub(suffix,'',vm_name) - - return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name) - -def fetch(vms): - ''' Fetch values for a list of pids - @param dictionnary {kvm_pid: cleaned vm name} - ''' - res = {} - for pid in vms: - tap = get_vm_mac(pid) + vm_name = re.sub(suffix, "", vm_name) + # proxmox uses kvm with -name parameter + parts = vm_name.split('\x00') + if (parts[0].endswith('kvm')): try: - f = open("/proc/net/dev", "r") - for line in f.readlines(): - if tap in line: - print "%s_in.value %s" % (vms[pid], re.sub(r"%s:"%tap, "", line.split()[0])) - print "%s_out.value %s" % (vms[pid], line.split()[8]) - break - except Exception as inst: - print inst - continue + return parts[parts.index('-name')+1] + except ValueError: + pass + return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name) + + +def fetch(vms): + """ Fetch values for a list of pids + + @param dictionary {kvm_pid: cleaned vm name} + """ + for pid, vm_data in vms.items(): + vm_interface_names = get_vm_network_interface_names(pid) + sum_incoming = 0 + sum_outgoing = 0 + interface_found = False + with open("/proc/net/dev", "r") as net_file: + for line in net_file.readlines(): + tokens = line.split() + current_interface_name = tokens[0].rstrip(":").strip() + if current_interface_name in vm_interface_names: + sum_incoming += int(tokens[1]) + sum_outgoing += int(tokens[9]) + interface_found = True + if not interface_found: + # we want to distinguish "no traffic" from "not found" + sum_incoming = "U" + sum_outgoing = "U" + print("%s_in.value %s" % (vm_data, sum_incoming)) + print("%s_out.value %s" % (vm_data, sum_outgoing)) + + +def get_vm_network_interface_names(pid): + """ return the MAC addresses configured for network interfacs of a PID """ + result = set() + for netdev_description in _get_kvm_process_arguments(pid, "netdev"): + match = KVM_INTERFACE_NAME_REGEX.search(netdev_description) + if match: + result.add(match.groups()[0]) + return result + def detect_kvm(): - ''' Check if kvm is installed - ''' - kvm = Popen("which kvm", shell=True, stdout=PIPE) + """ Check if kvm is installed """ + kvm = Popen(["which", "kvm"], stdout=PIPE) kvm.communicate() - return not bool(kvm.returncode) + return kvm.returncode == 0 + def find_vm_names(pids): - '''Find and clean vm names from pids - @return a dictionnary of {pids : cleaned vm name} - ''' + """Find and clean vm names from pids + + @return a dictionary of {pids : cleaned vm name} + """ result = {} for pid in pids: - cmdline = open("/proc/%s/cmdline" % pid, "r") - result[pid] = clean_vm_name(re.sub(r"^.*-name\x00([a-zA-Z0-9.-_-]*)\x00\-.*$",r"\1", cmdline.readline())) + name = None + name_arg_values = _get_kvm_process_arguments(pid, "name") + if name_arg_values: + name_arg_value = name_arg_values[0] + if "," in name_arg_value: + # the modern parameter format may look like this: + # guest=foo,debug-threads=on + for index, token in enumerate(name_arg_value.split(",")): + if (index == 0) and ("=" not in token): + # the first item may the plain name + name = value + elif "=" in token: + key, value = token.split("=", 1) + if key == "guest": + name = value + else: + # unknown format (no "mapping") + pass + else: + name = name_arg_value + if name is None: + print("Failed to parse VM name from commandline of process: {}" + .format(name_arg_values), file=sys.stderr) + else: + result[pid] = clean_vm_name(name) return result - -def get_vm_mac(pid): - '''Find and clean vm names from pids - @return the mac address for a specified pid - ''' - cmdline = open("/proc/%s/cmdline" % pid, "r") - line = cmdline.readline() - mac = re.sub(r"^.*ifname=(tap[^,]+),.*$",r"\1", line) - return mac + + +def _get_kvm_process_arguments(pid, arg_name): + """ parse all value with the given name from the process identified by PID + + The result is a list of tokens, that follow this argument name. The result + is empty in case of problems. + """ + # the "cmdline" (e.g. /proc/self/cmdline) is a null-separated token list + try: + with open("/proc/%s/cmdline" % pid, "r") as cmdline_file: + cmdline = cmdline_file.read() + except IOError: + # the process seems to have died meanwhile + return [] + is_value = False + result = [] + for arg_token in cmdline.split("\0"): + if is_value: + # the previous token was our argument name + result.append(arg_token) + is_value = False + elif arg_token == "-{}".format(arg_name): + # this is our argument name - we want to store the next value + is_value = True + else: + # any other irrelevant value + pass + return result + def list_pids(): - ''' Find the pid of kvm processes - @return a list of pids from running kvm - ''' - pid = Popen("pidof qemu-system-x86_64", shell=True, stdout=PIPE) - return pid.communicate()[0].split() + """ Find the pid of kvm processes -def find_vms_tap(): - ''' Check if kvm is installed @return a list of pids from running kvm - ''' - result = [] - tap = "" - mac = "" - kvm = Popen("ip a | grep -A 1 tap | awk '{print $2}' | grep -v '^$'", shell=True, stdout=PIPE) - res = kvm.communicate()[0].split('\n') - for line in res: - try: - if len(line) > 0: - if re.match(r"^tap.*", line): - tap = re.sub(r"(tap[^:]+):", r"\1", line) - else: - result.append(tap) - except Exception as inst: - continue + """ + pid = Popen(["pidof", "qemu-kvm", "qemu-system-x86_64", "kvm"], stdout=PIPE) + return pid.communicate()[0].decode().split() + - return result - if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] in ['autoconf', 'detect']: - if detect_kvm(): - print "yes" - else: - print "no" - elif sys.argv[1] == "config": - config(find_vm_names(list_pids()).values()) + action = sys.argv[1] if len(sys.argv) > 1 else None + if action == "autoconf": + if detect_kvm(): + print("yes") else: - fetch(find_vm_names(list_pids())) + print("no") + elif action == "config": + vm_data = find_vm_names(list_pids()) + config(vm_data.values()) else: - fetch(find_vm_names(list_pids())) - - + vm_data = find_vm_names(list_pids()) + fetch(vm_data) diff --git a/plugins/libvirt/libvirt b/plugins/libvirt/libvirt index 4b2bdcfd..753f50f7 100755 --- a/plugins/libvirt/libvirt +++ b/plugins/libvirt/libvirt @@ -87,7 +87,7 @@ $show{'network_drops'} = $ENV{show_network_drops} || 1; sub init() { my $type=undef; - + if ($ARGV[0] and $ARGV[0] eq "config"){ $type="config"; } @@ -142,7 +142,7 @@ sub init() { if(defined($hash{$name}{'info'}{'memory'})){ $hash{$name}{'info'}{'memory_bytes'} = 1024 * $hash{$name}{'info'}{'memory'}; } - + # Extract network usage if(defined($hash{$name}{'xml'}{'devices'}{'interface'}{'bridge'})){ my $vif_id=0; @@ -201,10 +201,10 @@ sub init() { #print "graph_order rd wr\n"; print "graph_title Disk utilization per domain in percent\n"; print "graph_vlabel %\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_args -l 0 --base 1000 --upper-limit 100\n"; #print "graph_width 550\n"; - + for my $vm (sort keys %hash) { if(defined($hash{$vm}{'devices'}{'block'})){ print $hash{$vm}{'label'} . ".label " . $hash{$vm}{'name'} . "\n"; @@ -214,14 +214,14 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_disk_utilization\n"; for my $vm (sort keys %hash) { if(defined($hash{$vm}{'devices'}{'block'}) && defined($prev_state_ref->{$vm}->{'devices'}->{'block'})){ my $utilization=0; - + for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ if(defined($prev_state_ref->{$vm}->{'devices'}->{'block'}->{$device})){ @@ -271,10 +271,10 @@ sub init() { #print "graph_order rd wr\n"; print "graph_title Disk utilization on " . $vm . " in percent\n"; print "graph_vlabel %\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_args -l 0 --base 1000 --upper-limit 100\n"; #print "graph_width 550\n"; - + if($devices>0){ if(defined($hash{$vm}{'devices'}{'block'})){ for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ @@ -286,25 +286,25 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ if($devices > 0){ print "multigraph libvirt_disk_utilization." . $hash{$vm}{'label'} . "\n"; if(defined($hash{$vm}{'devices'}{'block'}) && defined($prev_state_ref->{$vm}->{'devices'}->{'block'})){ - + for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ - + my $utilization=0; - + if(defined($prev_state_ref->{$vm}->{'devices'}->{'block'}->{$device})){ - + for my $source_device (keys %{$hash{$vm}{'devices'}{'block'}{$device}{'source'}}){ - + for my $slave_device (keys %{$hash{$vm}{'devices'}{'block'}{$device}{'source'}{$source_device}{'slaves'}}){ - + my $prev_ms_spent_doing_io=$prev_state_ref->{$vm}->{'devices'}->{'block'}->{$device}->{'source'}->{$source_device}->{'slaves'}->{$slave_device}->{'io_ticks'}; my $cur_ms_spent_doing_io=$hash{$vm}{'devices'}{'block'}{$device}{'source'}{$source_device}{'slaves'}{$slave_device}{'io_ticks'}; - + if($cur_ms_spent_doing_io > $prev_ms_spent_doing_io){ my $ticks=$cur_ms_spent_doing_io-$prev_ms_spent_doing_io; my $interval_ms=($time-$prev_time)*1000; @@ -341,9 +341,9 @@ sub init() { print "graph_title Disk latency per domain in seconds\n"; print "graph_args --base 1000\n"; print "graph_vlabel read (-) / write (+)\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $vm (sort keys %hash) { if(defined($hash{$vm}{'devices'}{'block'})){ @@ -358,7 +358,7 @@ sub init() { print $hash{$vm}{'label'} . "_rd.min 0\n"; print $hash{$vm}{'label'} . "_rd.draw LINE2\n"; print $hash{$vm}{'label'} . "_rd.graph no\n"; - + print $hash{$vm}{'label'} . "_wr.label " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_wr.info I/O latency in seconds on " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_wr.min 0\n"; @@ -368,7 +368,7 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_disk_latency\n"; for my $vm (sort keys %hash) { @@ -382,9 +382,9 @@ sub init() { my $cur_total_time_spent_reading=0; my $cur_total_ios_read=0; my $cur_total_ios_written=0; - + my $devices=0; - + for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ $devices++; @@ -454,18 +454,18 @@ sub init() { print "graph_title Disk latency per vbd on $vm in seconds\n"; print "graph_args --base 1000\n"; print "graph_vlabel read (-) / write (+)\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + if(defined($hash{$vm}{'devices'}{'block'})){ for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ - + print $hash{$vm}{'label'} . "_" . $device . "_rd.label " . $device . "_rd\n"; print $hash{$vm}{'label'} . "_" . $device . "_rd.info I/O latency in seconds on " . $hash{$vm}{'name'} . ":" . $device . "\n"; print $hash{$vm}{'label'} . "_" . $device . "_rd.min 0\n"; print $hash{$vm}{'label'} . "_" . $device . "_rd.draw LINE2\n"; print $hash{$vm}{'label'} . "_" . $device . "_rd.graph no\n"; - + print $hash{$vm}{'label'} . "_" . $device . "_wr.label " . $device . "\n"; print $hash{$vm}{'label'} . "_" . $device . "_wr.info I/O latency in seconds on " . $hash{$vm}{'name'} . ":" . $device . "\n"; print $hash{$vm}{'label'} . "_" . $device . "_wr.min 0\n"; @@ -479,11 +479,11 @@ sub init() { print "multigraph libvirt_disk_latency.$hash{$vm}{'label'}\n"; if(defined($hash{$vm}{'devices'}{'block'}) && defined($prev_state_ref->{$vm}->{'devices'}->{'block'})){ for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ - + if(defined($prev_state_ref->{$vm}->{'devices'}->{'block'}->{$device})){ - + for my $source_device (keys %{$hash{$vm}{'devices'}{'block'}{$device}{'source'}}){ - + my $prev_total_time_spent_writing=0; my $prev_total_time_spent_reading=0; my $prev_total_ios_read=0; @@ -492,7 +492,7 @@ sub init() { my $cur_total_time_spent_reading=0; my $cur_total_ios_read=0; my $cur_total_ios_written=0; - + for my $slave_device (keys %{$hash{$vm}{'devices'}{'block'}{$device}{'source'}{$source_device}{'slaves'}}){ my $prev_time_spent_writing=$prev_state_ref->{$vm}->{'devices'}->{'block'}->{$device}->{'source'}->{$source_device}->{'slaves'}->{$slave_device}->{'write_ticks'}; @@ -518,18 +518,18 @@ sub init() { my $read_latency=0; my $write_latency=0; - + if($prev_total_time_spent_reading > 0 && $prev_total_ios_read > 0 && ($cur_total_ios_read-$prev_total_ios_read) > 0){ $read_latency=(($cur_total_time_spent_reading-$prev_total_time_spent_reading)/($cur_total_ios_read-$prev_total_ios_read))/1000; } - + if($prev_total_time_spent_writing > 0 && $prev_total_ios_written > 0 && ($cur_total_ios_written-$prev_total_ios_written) > 0){ $write_latency=(($cur_total_time_spent_writing-$prev_total_time_spent_writing)/($cur_total_ios_written-$prev_total_ios_written))/1000; } - + print $hash{$vm}{'label'} . "_" . $device . "_rd.value " . $read_latency . "\n"; print $hash{$vm}{'label'} . "_" . $device . "_wr.value " . $write_latency . "\n"; - + } } } @@ -552,9 +552,9 @@ sub init() { print "graph_title Disk traffic per domain in bytes\n"; print "graph_args --base 1000\n"; print "graph_vlabel bytes read (-) / written (+) per \${graph_period}\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $vm (sort keys %hash) { my $devices=0; if(defined($hash{$vm}{'devices'}{'block'})){ @@ -565,14 +565,14 @@ sub init() { if($devices > 0){ if(defined($hash{$vm}{'devices'}{'block'})){ - + print $hash{$vm}{'label'} . "_rd_bytes.label " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_rd_bytes.type COUNTER\n"; print $hash{$vm}{'label'} . "_rd_bytes.info The number of bytes read by " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_rd_bytes.min 0\n"; print $hash{$vm}{'label'} . "_rd_bytes.draw LINE2\n"; print $hash{$vm}{'label'} . "_rd_bytes.graph no\n"; - + print $hash{$vm}{'label'} . "_wr_bytes.label " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_wr_bytes.type COUNTER\n"; print $hash{$vm}{'label'} . "_wr_bytes.info The number of bytes written by " . $hash{$vm}{'name'} . "\n"; @@ -583,7 +583,7 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_disk\n"; for my $vm (sort keys %hash) { @@ -604,7 +604,7 @@ sub init() { # # Disk used, second level # - + for my $vm (sort keys %hash) { my $devices=0; if(defined($hash{$vm}{'devices'}{'block'})){ @@ -621,16 +621,16 @@ sub init() { print "graph_title Disk traffic for " . $hash{$vm}{'name'} . "\n"; print "graph_args --base 1000\n"; print "graph_vlabel bytes read (-) / written (+) per \${graph_period}\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ print $device . "_rd_bytes.label " . $device . "_rd\n"; print $device . "_rd_bytes.type COUNTER\n"; print $device . "_rd_bytes.info The number of bytes read by " . $hash{$vm}{'name'} . "\n"; print $device . "_rd_bytes.min 0\n"; print $device . "_rd_bytes.graph no\n"; - + print $device . "_wr_bytes.label " . $device . "\n"; print $device . "_wr_bytes.type COUNTER\n"; print $device . "_wr_bytes.info The number of bytes written by " . $hash{$vm}{'name'} . "\n"; @@ -639,7 +639,7 @@ sub init() { print $device . "_wr_bytes.negative " . $device . "_rd_bytes\n"; } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_disk.bytes_" . $hash{$vm}{'label'} . "\n"; if(defined($hash{$vm}{'devices'}{'block'})){ @@ -671,8 +671,8 @@ sub init() { print "multigraph libvirt_disk_errs\n"; print "graph_title Disk errors per domain\n"; print "graph_args --base 1000\n"; - print "graph_category Virtualization\n"; - + print "graph_category virtualization\n"; + for my $vm (sort keys %hash) { if(defined($hash{$vm}{'devices'}{'block'})){ my $devices=0; @@ -691,7 +691,7 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_disk_errs\n"; for my $vm (sort keys %hash) { @@ -709,7 +709,7 @@ sub init() { # # Disk errors, second level # - + for my $vm (sort keys %hash) { my $devices=0; if(defined($hash{$vm}{'devices'}{'block'})){ @@ -723,18 +723,18 @@ sub init() { print "multigraph libvirt_disk_errs." . $hash{$vm}{'label'} . "\n"; print "graph_title Disk errors for " . $hash{$vm}{'name'} . "\n"; print "graph_args --base 1000\n"; - print "graph_category Virtualization\n"; - + print "graph_category virtualization\n"; + for my $device (keys %{$hash{$vm}{'devices'}{'block'}}){ - + print $device . "_errs.label " . $device . "\n"; print $device . "_errs.type COUNTER\n"; - print $device . "_errs.info The number of errors by " . $hash{$vm}{'name'} . " on defice " . $device . "\n"; + print $device . "_errs.info The number of errors by " . $hash{$vm}{'name'} . " on device " . $device . "\n"; print $device . "_errs.min 0\n"; print $device . "_errs.draw LINE2\n"; } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_disk_errs." . $hash{$vm}{'label'} . "\n"; if(defined($hash{$vm}{'devices'}{'block'})){ @@ -764,12 +764,12 @@ sub init() { print "graph_title Network traffic per domain in bytes\n"; print "graph_args --base 1000\n"; print "graph_vlabel Bytes in (-) / out (+) per \${graph_period}\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $vm (sort keys %hash) { if(defined($hash{$vm}{'devices'}{'network'})){ - + print $hash{$vm}{'label'} . "_rx_bytes.label " . $hash{$vm}{'name'} . "_rx\n"; print $hash{$vm}{'label'} . "_rx_bytes.type DERIVE\n"; print $hash{$vm}{'label'} . "_rx_bytes.info The number of bytes read by " . $hash{$vm}{'name'} . " in total.\n"; @@ -777,7 +777,7 @@ sub init() { print $hash{$vm}{'label'} . "_rx_bytes.draw LINE2\n"; print $hash{$vm}{'label'} . "_rx_bytes.graph no\n"; #print $hash{$vm}{'label'} . "_rx_bytes.cdef " . $hash{$vm}{'label'} . "_rx_bytes,8,*\n"; - + print $hash{$vm}{'label'} . "_tx_bytes.label " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_tx_bytes.type DERIVE\n"; print $hash{$vm}{'label'} . "_tx_bytes.info The number of bytes written by " . $hash{$vm}{'name'} . " in total.\n"; @@ -788,7 +788,7 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_network\n"; for my $vm (sort keys %hash) { @@ -817,12 +817,12 @@ sub init() { print "graph_title Network traffic for " . $vm . "\n"; print "graph_args --base 1000\n"; print "graph_vlabel Bits in (-) / out (+) per \${graph_period}\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $vif (keys %{$hash{$vm}{'devices'}{'network'}}){ my $vif_id=$hash{$vm}{'devices'}{'network'}{$vif}{'vif_id'}; - + print "rx_bytes_" . $vif_id . ".label " . $vif . "_rx\n"; print "rx_bytes_" . $vif_id . ".type DERIVE\n"; print "rx_bytes_" . $vif_id . ".info The number of bytes read by " . $hash{$vm}{'name'} . "\n"; @@ -870,19 +870,19 @@ sub init() { print "graph_title Network packets dropped per domain\n"; print "graph_args --base 1000\n"; print "graph_vlabel Count in (-) / out (+) per \${graph_period}\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $vm (sort keys %hash) { if(defined($hash{$vm}{'devices'}{'network'})){ - + print $hash{$vm}{'label'} . "_rx_drop.label " . $hash{$vm}{'name'} . "_rx\n"; print $hash{$vm}{'label'} . "_rx_drop.type DERIVE\n"; print $hash{$vm}{'label'} . "_rx_drop.info The number of packets dropped at reception by " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_rx_drop.min 0\n"; print $hash{$vm}{'label'} . "_rx_drop.draw LINE2\n"; print $hash{$vm}{'label'} . "_rx_drop.graph no\n"; - + print $hash{$vm}{'label'} . "_tx_drop.label " . $hash{$vm}{'name'} . "\n"; print $hash{$vm}{'label'} . "_tx_drop.type DERIVE\n"; print $hash{$vm}{'label'} . "_tx_drop.info The number of packets dropped at transmission by " . $hash{$vm}{'name'} . "\n"; @@ -892,7 +892,7 @@ sub init() { } } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_network_drop\n"; for my $vm (sort keys %hash) { @@ -923,12 +923,12 @@ sub init() { print "graph_title Network packeds dropped by " . $vm . "\n"; print "graph_args --base 1000\n"; print "graph_vlabel Count in (-) / out (+) per \${graph_period}\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + for my $vif (keys %{$hash{$vm}{'devices'}{'network'}}){ my $vif_id=$hash{$vm}{'devices'}{'network'}{$vif}{'vif_id'}; - + print "rx_drop_" . $vif_id . ".label " . $vif . "_rx\n"; print "rx_drop_" . $vif_id . ".type DERIVE\n"; print "rx_drop_" . $vif_id . ".info The number of packets dropped by " . $hash{$vm}{'name'} . ", nic " . $vif_id . "\n"; @@ -960,7 +960,7 @@ sub init() { # # CPU used # - + if($show{'cpu_used'} == 1){ # @@ -971,9 +971,9 @@ sub init() { print "multigraph libvirt_cpu\n"; print "graph_title Cpu time used per domain in percent\n"; print "graph_args --base 1000 -r --lower-limit 0 --upper-limit 100\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + my $draw="AREA"; for my $vm (sort keys %hash) { print $hash{$vm}{'label'} . "_time.label " . $hash{$vm}{'name'} . "\n"; @@ -984,7 +984,7 @@ sub init() { $draw="STACK" if $draw eq "AREA"; } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_cpu\n"; for my $vm (sort keys %hash) { @@ -996,15 +996,15 @@ sub init() { # # CPU used, second level (pr virtual machine) # - + if($type eq "config"){ for my $vm (sort keys %hash) { print "multigraph libvirt_cpu.vm_" . $hash{$vm}{'label'} . "\n"; print "graph_title Cpu time used by " . $hash{$vm}{'name'} . " in percent\n"; print "graph_args --base 1000\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + print "time.label " . $hash{$vm}{'name'} . " (" . $hash{$vm}{'type'} . ")\n"; print "time.type DERIVE\n"; print "time.info The cpu time used by " . $hash{$vm}{'name'} . " in percent of the total available cpu time on the physical node. This domain has access to " . $hash{$vm}{'info'}{'nrVirtCpu'} . " VCPU(s) now, and $hash{$vm}{'maxvcpus'} at maximum. The scheduler for this domain is " . $hash{$vm}{'scheduler'} . ".\n"; @@ -1012,7 +1012,7 @@ sub init() { print "time.draw AREA\n"; print "\n"; } - + } elsif($type eq "fetch"){ for my $vm (sort keys %hash) { print "multigraph libvirt_cpu.vm_" . $hash{$vm}{'label'} . "\n"; @@ -1036,9 +1036,9 @@ sub init() { print "multigraph libvirt_mem\n"; print "graph_title Memory allocated per domain\n"; print "graph_args --base 1000\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + my $draw="AREA"; for my $vm (sort keys %hash) { print $hash{$vm}{'label'} . "_alloc.label " . $hash{$vm}{'name'} . "\n"; @@ -1049,7 +1049,7 @@ sub init() { $draw="STACK" if $draw eq "AREA"; } print "\n"; - + } elsif($type eq "fetch"){ print "multigraph libvirt_mem\n"; for my $vm (sort keys %hash) { @@ -1061,15 +1061,15 @@ sub init() { # # Memory allocated, second level (pr virtual machine) # - + if($type eq "config"){ for my $vm (sort keys %hash) { print "multigraph libvirt_mem.vm_" . $hash{$vm}{'label'} . "\n"; print "graph_title Memory allocated to " . $hash{$vm}{'name'} . "\n"; print "graph_args --base 1000\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; #print "graph_width 550\n"; - + print "mem.label " . $hash{$vm}{'name'} . " (" . $hash{$vm}{'type'} . ")\n"; print "mem.type GAUGE\n"; print "mem.info Amount of memory allocated to " . $hash{$vm}{'name'} . ". The maximum amount of memory for this domain is " . $hash{$vm}{'maxmem'}/1024 . " MB.\n"; @@ -1077,7 +1077,7 @@ sub init() { print "mem.draw AREA\n"; print "\n"; } - + } elsif($type eq "fetch"){ for my $vm (sort keys %hash) { print "multigraph libvirt_mem.vm_" . $hash{$vm}{'label'} . "\n"; @@ -1156,7 +1156,7 @@ sub parse_xml { my $teller = 0; my $fortsette = 1; - + while($fortsette){ if( $a[$i]{'devices'}[0]{'disk'}[$teller] ){ my $type=$a[$i]{'devices'}[0]{'disk'}[$teller]{'type'}; @@ -1173,7 +1173,7 @@ sub parse_xml { else{ $fortsette = 0; } - } + } } # Hack to extract network information and put it into the hash @@ -1182,7 +1182,7 @@ sub parse_xml { my $teller = 0; my $fortsette = 1; - + while($fortsette){ if( $a[$i]{'devices'}[0]{'interface'}[$teller] ){ my $type=$a[$i]{'devices'}[0]{'interface'}[$teller]{'type'}; @@ -1198,7 +1198,7 @@ sub parse_xml { else{ $fortsette = 0; } - } + } } } @@ -1213,11 +1213,11 @@ sub read_diskstats{ # Verify that $dev is a block device. if(-b $dev){ # Read minor and major number - my $rdev = stat($dev)->rdev; - $res{'major'} = $rdev >> 8; + my $rdev = stat($dev)->rdev; + $res{'major'} = $rdev >> 8; $res{'minor'} = $rdev & 0xff; - # If major numer is 253, then proceed as dm-device + # If major number is 253, then proceed as dm-device if($res{'major'} == 253){ # check that the directory /sys/block/dm-$minor/ exists with a /slaves/ sub directory @@ -1238,7 +1238,7 @@ sub read_diskstats{ while (my $line = ) { # 1 2 3 4 5 6 7 8 9 10 11 if($line =~ m/(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)/){ - + # Name units description # ---- ----- ----------- # read I/Os requests number of read I/Os processed diff --git a/plugins/libvirt/munin-libvirtpy b/plugins/libvirt/munin-libvirtpy index 34631725..b37e7a19 100755 --- a/plugins/libvirt/munin-libvirtpy +++ b/plugins/libvirt/munin-libvirtpy @@ -1,6 +1,6 @@ #!/usr/bin/python -# Revision 1.0 2008/05/16 - Steven Wagner +# Revision 1.0 2008/05/16 - Steven Wagner # First functional release. Works for me. # # Revision 0.5 2008/05/01 - Julien Rottenberg @@ -37,7 +37,7 @@ if len(sys.argv) == 2: print "graph_title KVM Domain CPU Utilization" print "graph_vlabel CPU use in seconds" print "graph_args --base 1000" - print "graph_category Virtualization" + print "graph_category virtualization" for id in ids: dom = conn.lookupByID(id) @@ -45,7 +45,7 @@ if len(sys.argv) == 2: print "%s.type COUNTER" %(nodeName) print "%s.label %s" %(nodeName, nodeName) sys.exit(1) - + for id in ids: dom = conn.lookupByID(id) state, maxMem, memory, numVirtCpu, cpuTime = dom.info() @@ -55,5 +55,3 @@ for id in ids: # print """Domain: %s, %s state (%s), %d CPUs, %d seconds, %d milliseconds, mem/max (%d/%d) """ \ # % (nodeName, ostype, state, numVirtCpu, cpuTime/float(1000000000), cpuTime/float(1000000), memory, maxMem ) print "%s.value %d" % (nodeName, cpuTime/float(1000000)) - - diff --git a/plugins/licensing/flexlm_ b/plugins/licensing/flexlm_ index 1ad006f4..ff7c3145 100755 --- a/plugins/licensing/flexlm_ +++ b/plugins/licensing/flexlm_ @@ -42,11 +42,11 @@ # - LMSTAT: The path to the lmstat binary # - LMLICFILE: The path to the FlexLM License File # - LMLOGARITHMIC If set then graph use a logarithmic scale -# +# # $Log$ # Revision 1.00 20090807 nassarmu # Initial public release. -# +# # Revision 1.10 20120625 nassarmu@msi.umn.edu # incorporate the rewrite by TSUCHIYA Masatoshi # diff --git a/plugins/lighttpd/lighttpd_ b/plugins/lighttpd/lighttpd_ index 838bfa13..98863983 100755 --- a/plugins/lighttpd/lighttpd_ +++ b/plugins/lighttpd/lighttpd_ @@ -1,84 +1,111 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # vim: set fileencoding=utf-8 -# -# Munin plugin to monitor lighttpd web-server. -# -# Copyright Igor Borodikhin -# -# License : GPLv3 -# -# Configuration parameters: -# env.status_url - url of lighty's server-status (optional, default is http://127.0.0.1/server-status) -# env.username - username to provide if status_url requires authentication (optional, default - no authentication) -# env.password - password to provide if status_url requires authentication (optional, default - no authentication) -# env.auth_type - the authentication mechanism to use -- either 'basic' (default) or 'digest'. -# -# Note: If HTTP authentication is required you should specify both username and password. -# -# ## Installation -# Copy file to directory /usr/share/munin/plugins/ -# Because this plugin has suggest capability the last step is to run -# # munin-node-configure --suggest --shell | sh -x -# -#%# family=contrib -#%# capabilities=autoconf suggest +""" + +=head1 NAME + +Munin plugin to monitor lighttpd web-server. + + +=head1 CONFIGURATION + +Configuration parameters: + + [lighttpd_] + env.status_url - url of lighty's server-status + (optional, default is http://127.0.0.1/server-status) + env.username - username to provide if status_url requires authentication + (optional, default - no authentication) + env.password - password to provide if status_url requires authentication + (optional, default - no authentication) + env.auth_type - the authentication mechanism to use -- either 'basic' (default) or 'digest'. + +Note: If HTTP authentication is required you should specify both username and password. + + +=head1 INSTALLATION + +Copy file to directory /usr/share/munin/plugins/ +Because this plugin has "suggest" capability the last step is to run + + munin-node-configure --suggest --shell | sh -x + + +=head1 AUTHOR + +Copyright Igor Borodikhin + + +=head1 LICENSE + +GPLv3 + + +=head1 MAGIC MARKERS + #%# family=contrib + #%# capabilities=autoconf suggest +""" + +import os +import sys +import urllib2 -import os, sys, urllib2 program = sys.argv[0] -graph_type = program[program.rfind("_")+1:] +graph_type = program[program.rfind("_") + 1:] graph_types = { - "accesses" : [ + "accesses": [ { - "title" : "Total accesses", - "type" : "COUNTER", - "args" : "--base 1000 -l 0", - "fields" : ["accesses"] + "title": "Total accesses", + "type": "COUNTER", + "args": "--base 1000 -l 0", + "fields": ["accesses"] } ], - "kbytes" : [ + "kbytes": [ { - "title" : "Total kBytes", - "type" : "COUNTER", - "args" : "--base 1024 -l 0", - "fields" : ["kbytes"] + "title": "Total kBytes", + "type": "COUNTER", + "args": "--base 1024 -l 0", + "fields": ["kbytes"] } ], - "uptime" : [ + "uptime": [ { - "title" : "Uptime", - "type" : "GAUGE", - "args" : "--base 1000 -l 0", - "fields" : ["uptime"] + "title": "Uptime", + "type": "GAUGE", + "args": "--base 1000 -l 0", + "fields": ["uptime"] } ], - "status" : [ + "status": [ { - "title" : "Status", - "type" : "GAUGE", - "args" : "--base 1000 -l 0", - "fields" : ["busy", "idle"] + "title": "Status", + "type": "GAUGE", + "args": "--base 1000 -l 0", + "fields": ["busy", "idle"] } ] } + if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - print "yes" + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": if graph_type not in graph_types.keys(): - raise Exception("Unknown graph type '%s'"%graph_type) + raise Exception("Unknown graph type '%s'" % graph_type) params = graph_types[graph_type] for item in params: - print "graph_title %s" % item["title"] - print "graph_category webserver" + print("graph_title %s" % item["title"]) + print("graph_category webserver") for field in item["fields"]: - print "%s.label %s" % (field, field) - print "%s.type %s" % (field, item["type"]) - print "graph_args %s" % item["args"] + print("%s.label %s" % (field, field)) + print("%s.type %s" % (field, item["type"])) + print("graph_args %s" % item["args"]) elif len(sys.argv) == 2 and sys.argv[1] == "suggest": for item in graph_types.keys(): - print item + print(item) else: status_url = os.environ.get('status_url', 'http://127.0.0.1/server-status') @@ -96,17 +123,17 @@ else: data = {} for line in info.split("\n"): try: - (title, value) = line.split(": ") - data[title] = value - except Exception: - pass + (title, value) = line.split(": ") + data[title] = value + except ValueError: + pass if graph_type == "accesses": - print "accesses.value %s" % data["Total Accesses"] + print("accesses.value %s" % data["Total Accesses"]) elif graph_type == "kbytes": - print "kbytes.value %s" % data["Total kBytes"] + print("kbytes.value %s" % data["Total kBytes"]) elif graph_type == "uptime": - print "uptime.value %s" % str(float(data["Uptime"])/86400) + print("uptime.value %s" % str(float(data["Uptime"]) / 86400)) elif graph_type == "status": - print "busy.value %s" % data["BusyServers"] - print "idle.value %s" % data["IdleServers"] + print("busy.value %s" % data["BusyServers"]) + print("idle.value %s" % data["IdleServers"]) diff --git a/plugins/load/load_extended b/plugins/load/load_extended index 3e7973ce..96af601c 100755 --- a/plugins/load/load_extended +++ b/plugins/load/load_extended @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the load average on a system. # # Usage: Link or copy into /etc/munin/node.d/ @@ -40,7 +40,7 @@ # If run with the "autoconf"-parameter, give our opinion on whether we -# should be run on this system or not. This is optinal, and only used by +# should be run on this system or not. This is optional, and only used by # munin-config. In the case of this plugin, we should most probably # always be included. @@ -50,8 +50,8 @@ if [ "$1" = "autoconf" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. - +# graphs should look. + if [ "$1" = "config" ]; then # The host name this plugin is for. (Can be overridden to have # one machine answer for several) @@ -69,7 +69,7 @@ if [ "$1" = "config" ]; then # Graph category. Defaults to 'other' echo 'graph_category system' # The fields. "label" is used in the legend. "label" is the only - # required subfield. + # required subfield. echo 'load1.label load 1 min avg' echo 'load1.draw AREA' echo 'load5.label load 5 min avg' @@ -133,5 +133,5 @@ cut -f3 -d' ' < /proc/loadavg # Except for the Nagios-warnings (which most people don't have any need # for) and things used by installation scripts and munin-config (which # you don't need if you don't plan on submitting your plugin to the -# pacakge), and the scaling (no milliload numbers) the two versions will +# package), and the scaling (no milliload numbers) the two versions will # work identically. diff --git a/plugins/logins/logins b/plugins/logins/logins index db7278b4..bde4b43d 100755 --- a/plugins/logins/logins +++ b/plugins/logins/logins @@ -17,10 +17,10 @@ EOF # can go into an include file labels=$((${#graph[*]}/(graph+1))) -for ((i=0; i would be +F. + +One good way to get these names is to run C +after you've configured the required variables and then just copy/paste +the names from the output. + +=head1 AUTHOR + +Kael Shipman + +=head1 LICENSE + +Copyright 2018 Kael Shipman + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +=head1 MAGIC MARKERS + + #%# family=manual + +=cut + + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +regex=${regex:-} +logfiles=${logfiles:-} +LOGFILES="$(IFS= ; for f in $logfiles; do echo "$f"; done)" +title="${title:-Log Matches}" + + +function config() { + echo "graph_title ${title}" + echo "graph_args --base 1000 -l 0" + echo "graph_vlabel ${title}" + echo "graph_category other" + echo "graph_info Lists number of times the given regex is matched in the given log files per period" + + local var_prefix var logfile lbl + while read -u 3 -r logfile; do + var_prefix="$(echo "$logfile" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + var="${var_prefix}_label" + lbl="${!var:-$logfile}" + echo "$var_prefix.label $lbl" + print_warning "$var_prefix" + print_critical "$var_prefix" + echo "$var_prefix.info Lines that match '${regex}' in log file '$logfile'" + done 3< <(echo "$LOGFILES") +} + + +function fetch() { + # Load state + touch "$MUNIN_STATEFILE" + local nextstate=() + local curstate + curstate="$(cat "$MUNIN_STATEFILE")" + + local var_prefix logfile prvlines curlines matches + while read -u 3 -r logfile; do + # Convert current logfile path to variable prefix + var_prefix="$(echo "$logfile" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + + # Get running number of lines to determine whether or not the file may have been rotated + prvlines="$(echo "$curstate" | grep "^${var_prefix}_lines=" | cut -f 2 -d "=")" + if [ -z "$prvlines" ]; then + prvlines=0 + fi + + # Get the current number of lines in the file + curlines="$(wc -l < "$logfile")" + if [ -z "$curlines" ]; then + curlines=0 + fi + + # If the current line count is less than the previous line count, we've probably rotated. + # Reset to 0. + if [ "$curlines" -lt "$prvlines" ]; then + prvlines=0 + else + prvlines=$((prvlines + 1)) + fi + + # Get current number of incidents + matches="$(tail -n +"$prvlines" "$logfile" | grep -Ec "${regex}" || true)" + + # Echo the value + echo "$var_prefix.value $matches" + + # Push onto next state + nextstate+=("${var_prefix}_lines=$curlines") + done 3< <(echo "$LOGFILES") + + # Write state to munin statefile + (IFS=$'\n'; echo "${nextstate[*]}" > "$MUNIN_STATEFILE") + + return 0 +} + + +case "$1" in + config) config ;; + *) fetch ;; +esac diff --git a/plugins/logs/service_events b/plugins/logs/service_events new file mode 100755 index 00000000..d16554f1 --- /dev/null +++ b/plugins/logs/service_events @@ -0,0 +1,415 @@ +#!/bin/bash + +set -e + +: << =cut + +=head1 DESCRIPTION + +service_events - Tracks the number of significant event occurrences per service + +This plugin is a riff on the loggrep family (C and my own C). +However, rather than focusing on single log files, it focuses on providing +insight into all "significant events" happening for a given service, which +may be found across several log files. + +The idea is that any given service may produce events in various areas of +operation. For example, while a typical web app might log runtime errors +to it's app.log file, a filesystem change may prevent the whole app from +even being bootstrapped, and this crucial error may be logged in an apache +log or in syslog. + +This plugin attempts to give visibility into all such "important events" +that may affect the proper functioning of a given service. It attempts to +answer the question, "Is my service running normally?". + +Unfortunately, it won't help you trace down exactly where the events are +coming from if you happen to be watching a number of different logs, but +it will at least let you know that something is wrong and that action +should be taken. To try to help with this, the plugin uses the extinfo +field to list which logs currently have important events in them. + +The plugin can be included multiple times to create graphs for various +differing kinds of services. For example, you may have both webservices +and system cleanup services, and you want to keep an eye on them in +different ways. + +You can accomplish this by linking the plugin twice with different names +and providing different configuration for each instance. In general, you +should think of a single instance of this plugin as representing a single +class of services. + + +=head1 CONFIGURATION + +Configuration for this plugin is admittedly complicated. What we're doing +here is defining groups of logfiles that we're searching for various +kinds of events. It is assumed that the _way_ we search for events in the +logfiles is related to the type of logfile; thus, we associate match +criteria with logfile groups. Then, we define services that we want to +track, then mappings of logfile paths to those services. + +(Note that most instances will probably work best when run as root, since +log files are usually (or at least should be) controlled with strict +permissions.) + +Available config options include the following: + + Plugin-specific: + + env._logfiles - (reqd) Shell glob pattern defining logfiles of + type + env._regex - (reqd) egrep pattern for finding events in logs + of type + env.services - (optl) Space-separated list of service names + env.services_autoconf - (optl) Shell glob pattern that expands to paths + whose final member is the name of a service + env._logbinding - (optl) egrep pattern for binding to + a given set of logfiles (based on path) + env._warning - (optl) service-specific warning level override + env._critical - (optl) service-specific critical level override + + Munin-standard: + + env.title - Graph title + env.vlabel - Custom label for the vertical axis + env.warning - Default warning level + env.critical - Default critical level + +For plugin-specific options, the following rules apply: + +* C<< >> is any arbitrary string. It just has to match between + C<< _logfiles >> and C<< _regex >>. Common values are "apache", + "nginx", "apt", "syslog", etc. +* is a string derived by passing the service name through a filter + that removes non-alphabet characters from the beginning and replaces all non- + alphanumeric characters with underscore (C<_>). +* logfiles are bound to services by matching C<< _logbinding >> on the + full logfile path. For example, specifying C would + bind both F and F + to the defined C service. + + +=head2 SERVICE AUTOCONF + +Because services are often dynamic and you don't want to have to manually update +config every time you deploy a new service, you have the option of defining a +glob pattern that resolves to a collection of paths whose endpoints are service +names. Because of the way services are deployed in real life, it's fairly common +that paths will exist on your system that can accommodate this. Most often it +will be something like /srv/*/*, which would match all children in /srv/www/ and +/srv/local/. + +If you choose not to use the autoconf feature, you MUST specify services as a +space-separated list of service names in the C variable. + + +=head2 EXAMPLE CONFIGS + +This example uses services autoconf: + + [service_events] + user root + env.services_autoconf /srv/*/* + env.cfxsvc_logfiles /srv/*/*/logs/app.log + env.cfxsvc_regex error|alert|crit|emerg + env.phpfpm_logfiles /srv/*/*/logs/php-fpm*.log + env.phpfpm_regex Fatal error + env.apache_logfiles /srv/*/*/logs/errors.log + env.apache_regex error|alert|crit|emerg + env.warning 1 + env.critical 5 + env.my_special_service_warning 100 + env.my_special_service_critical 300 + +This example DOES NOT use services autoconf: + + [service_events] + user root + env.services auth.example.com admin.example.com www.example.com + env.auth_example_com_logbinding my-custom-binding[0-9]+ + env.cfxsvc_logfiles /srv/*/*/logs/app.log + env.cfxsvc_regex error|alert|crit|emerg + env.phpfpm_logfiles /srv/*/*/logs/php-fpm*.log + env.phpfpm_regex Fatal error + env.apache_logfiles /srv/*/*/logs/errors.log + env.apache_regex error|alert|crit|emerg + env.warning 1 + env.critical 5 + env.auth_example_com_warning 100 + env.auth_example_com_critical 300 + env.www_example_com_warning 50 + env.www_example_com_critical 100 + +This graph will ONLY ever show values for the three listed services, even +if other services are installed whose logfiles match the logfiles search. + +Also notice that in this example, we've only listed a log binding for the +auth service. The plugin will use the service name by default for any +services that don't specify a log binding, so in this case, auth has a +custom log binding, while all other services have log bindings equal to +their names. + + +=head1 AUTHOR + +Kael Shipman + + +=head1 LICENSE + +MIT LICENSE + +Copyright 2018 Kael Shipman + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +=head1 MAGIC MARKERS + + #%# family=manual + +=cut + + +services_autoconf=${services_autoconf:-} + +# Get list of all currently set env variables +vars=$(printenv | cut -f 1 -d "=") + +# Certain variables MUST be set; check that they are (using bitmask) +setvars=0 +reqvars=(_logfiles _regex) +while read -u 3 -r v; do + n=0 + while [ "$n" -lt "${#reqvars[@]}" ]; do + if echo "$v" | grep -Eq "${reqvars[$n]}$"; then + setvars=$((setvars | (2 ** n) )) + fi + n=$((n+1)) + done +done 3< <(echo "$vars") + + +# Sum all required variables +n=0 +allvars=0 +while [ "$n" -lt "${#reqvars[@]}" ]; do + allvars=$(( allvars + 2 ** n )) + n=$((n+1)) +done + +# And scream if something's not set +if ! [ "$setvars" -eq "$allvars" ]; then + >&2 echo "E: Missing some required variables:" + >&2 echo + n=0 + i=1 + while [ "$n" -lt "${#reqvars[@]}" ]; do + if [ $(( setvars & i )) -eq 0 ]; then + >&2 echo " *${reqvars[$n]}" + fi + i=$((i<<1)) + n=$((n+1)) + done + >&2 echo + >&2 echo "Please read the docs." + exit 1 +fi + +# Check for more difficult variables +if [ -z "$services" ] && [ -z "$services_autoconf" ]; then + >&2 echo "E: You must pass either \$services or \$services_autoconf" + exit 1 +fi +if [ -z "$services_autoconf" ] && ! echo "$vars" | grep -q "_logbinding"; then + >&2 echo "E: You must pass either \$*_logbinding (for each service) or \$services_autoconf" + exit 1 +fi + + +# Now go find all log files +LOGFILES= +declare -a LOGFILEMAP +while read -u 3 -r v; do + if echo "$v" | grep -Eq "_logfiles$"; then + # Get the name associated with these logfiles + logfiletype="${v%_logfiles}" + # This serves to expand globs while preserving spaces (and also appends the necessary newline) + while IFS= read -u 4 -r -d$'\n' line; do + LOGFILEMAP+=($logfiletype) + LOGFILES="${LOGFILES}$line"$'\n' + done 4< <(IFS= ; for f in ${!v}; do echo "$f"; done) + fi +done 3< <(echo "$vars") + + +# Set some defaults and other values +title="${title:-Important Events per Service}" +vlabel="${vlabel:-events}" + +# If services_autoconf is passed, it is assumed to be a shell glob, the leaves of which are the services +# This also autobinds the service, if not already bound +if [ -n "$services_autoconf" ]; then + declare -a services + IFS= + for s in $services_autoconf; do + s="$(basename "$s")" + services+=("$s") + done + unset IFS +else + services=($services) +fi + + +# Import munin functions +. "$MUNIN_LIBDIR/plugins/plugin.sh" + + +# Now get to the real function definitions + +function config() { + echo "graph_title ${title}" + echo "graph_args --base 1000 -l 0" + echo "graph_vlabel ${vlabel}" + echo "graph_category other" + echo "graph_info Lists number of matching lines found in various logfiles associated with each service. Extinfo displays currently affected logs." + + local var_prefix + while read -u 3 -r svc; do + var_prefix="$(echo "$svc" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + echo "$var_prefix.label $svc" + print_warning "$var_prefix" + print_critical "$var_prefix" + echo "$var_prefix.info Number of event occurrences for $svc" + done 3< <(IFS=$'\n'; echo "${services[*]}") +} + + +function fetch() { + local curstate n svcnm varnm service svc svc_counter_var logbinding logfile lognm logmatch prvlines curlines matches extinfo_var + local nextstate=() + + # Load state + touch "$MUNIN_STATEFILE" + curstate="$(cat "$MUNIN_STATEFILE")" + + # Set service counters to 0 and set any logbindings that aren't yet set + while read -u 3 -r svc; do + svcnm="$(echo "$svc" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + typeset "${svcnm}_total=0" + + varnm="${svcnm}_logbinding" + if [ -z "$(echo "$curstate" | grep "^${varnm}=" | cut -f 2 -d "=")" ]; then + typeset "$varnm=$svc" + fi + done 3< <(IFS=$'\n'; echo "${services[*]}") + + n=0 + while read -u 3 -r logfile; do + # Handling trailing newline + if [ -z "$logfile" ]; then + continue + fi + + # Make sure the logfile exists + if [ ! -e "$logfile" ]; then + >&2 echo "Logfile '$logfile' doesn't exist. Skipping." + n=$((n+1)) + continue + fi + + # Find which service this logfile is associated with + service= + while read -u 4 -r svc; do + logbinding="$(echo "$svc" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')_logbinding" + if echo "$logfile" | grep -Eq "${!logbinding}"; then + service="$svc" + break + fi + done 4< <(IFS=$'\n'; echo "${services[*]}") + + # Skip this log if it's not associated with any service + if [ -z "$service" ]; then + >&2 echo "W: No service associated with log $logfile. Skipping...." + continue + fi + + # Get shell-compatible names for service and logfile + svcnm="$(echo "$service" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + lognm="$(echo "$logfile" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + + # Get previous line count to determine whether or not the file may have been rotated (defaulting to 0) + prvlines="$(echo "$curstate" | grep "^${lognm}_lines=" | cut -f 2 -d "=")" + prvlines="${prvlines:-0}" + + # Get the current number of lines in the file (defaulting to 0 on error) + curlines="$(wc -l < "$logfile")" + + # If the current line count is less than the previous line count, we've probably rotated. + # Reset to 0. + if [ "$curlines" -lt "$prvlines" ]; then + prvlines=0 + else + prvlines=$((prvlines + 1)) + fi + + # Get incidents starting at the line after the last line we've seen + logmatch="${LOGFILEMAP[$n]}_regex" + matches="$(tail -n +"$prvlines" "$logfile" | grep -Ec "${!logmatch}" || true)" + + # If there were matches, aggregate them and add this log to the extinfo for the service + if [ "$matches" -gt 0 ]; then + # Aggregate and add to the correct service counter + svc_counter_var="${svcnm}_total" + matches=$((matches + ${!svc_counter_var})) + typeset "$svc_counter_var=$matches" + + # Add this log to extinfo for service + extinfo_var="${svcnm}_extinfo" + typeset "$extinfo_var=${!extinfo_var}$logfile, " + fi + + # Push onto next state + nextstate+=("${lognm}_lines=$curlines") + + n=$((n+1)) + done 3< <(echo "$LOGFILES") + + # Write state to munin statefile + (IFS=$'\n'; echo "${nextstate[*]}" > "$MUNIN_STATEFILE") + + # Now echo values + while read -u 3 -r svc; do + svcnm="$(echo "$svc" | sed -r 's/^[^a-zA-Z]+//g' | sed -r 's/[^a-zA-Z0-9]+/_/g')" + svc_counter_var="${svcnm}_total" + extinfo_var="${svcnm}_extinfo" + echo "${svcnm}.value ${!svc_counter_var}" + echo "${svcnm}.extinfo ${!extinfo_var}" + done 3< <(IFS=$'\n'; echo "${services[*]}") + + return 0 +} + + +case "$1" in + config) config ;; + *) fetch ;; +esac diff --git a/plugins/luftdaten/feinstaubsensor b/plugins/luftdaten/feinstaubsensor new file mode 100755 index 00000000..e9732bfb --- /dev/null +++ b/plugins/luftdaten/feinstaubsensor @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 +""" + +=head1 NAME + +feinstaubsensor - Plugin to monitor one or more environmental sensors + + +=head1 APPLICABLE SYSTEMS + +The "Feinstaubsensor" was developed by the OK Lab Stuttgart and is part of the +Citizen Science Project "luftdaten.info" (http://luftdaten.info). + +Data is retrieved via HTTP requests from the sensors itself. + + +=head1 CONFIGURATION + +Place a configuration entry somewhere below /etc/munin/plugin-conf.d/: + + [feinstaubsensor] + env.sensor_hosts foo=192.168.1.4 [fe80::1:2:3:4%eth0] bar=sensor2.lan + +The environment variable is a space separated list of . +Each can be either a or a combination of label and (separated by the +character "="). +A may be an IPv4 address, an IPv6 address (enclosed in square brackets) or a name to be +resolved via DNS. + +Examples for : + * 192.168.1.4 + * foo=192.168.1.4 + * [fe80::1a:2b:3c:cafe] + * bar=[fe80::1a:2b:3c:cafe] + * feinstaubsensor-12345.local + * baz=feinstaubsensor-12345.local + + +=head1 AUTHOR + + Lars Kruse + + +=head1 LICENSE + + GPLv3 + + +=head1 MAGIC MARKERS + + #%# family=manual + +""" + +import collections +import functools +import json +import os +import re +import sys +import urllib.request + + +graphs = [ + { + "name": "wireless_signal", + "graph_title": "Feinstaub Wifi Signal", + "graph_vlabel": "%", + "graph_args": "-l 0", + "graph_info": "Wifi signal strength", + "api_value_name": "signal", + "value_type": "GAUGE", + }, { + "name": "feinstaub_samples", + "graph_title": "Feinstaub Sample Count", + "graph_vlabel": "#", + "graph_info": "Number of samples since bootup", + "api_value_name": "samples", + "value_type": "DERIVE", + }, { + "name": "feinstaub_humidity", + "graph_title": "Feinstaub Humidity", + "graph_vlabel": "% humidity", + "graph_info": "Weather information: air humidity", + "api_value_name": "humidity", + "value_type": "GAUGE", + }, { + "name": "feinstaub_temperature", + "graph_title": "Feinstaub Temperature", + "graph_vlabel": "°C", + "graph_info": "Weather information: temperature", + "api_value_name": "temperature", + "value_type": "GAUGE", + }, { + "name": "feinstaub_particles_pm10", + "graph_title": "Feinstaub Particle Measurement P10", + "graph_vlabel": "µg / m³", + "graph_info": "Concentration of particles with a size between 2.5µm and 10µm", + "api_value_name": "SDS_P1", + "value_type": "GAUGE", + }, { + "name": "feinstaub_particles_pm2_5", + "graph_title": "Feinstaub Particle Measurement P2.5", + "graph_vlabel": "µg / m³", + "graph_info": "Concentration of particles with a size up to 2.5µm", + "api_value_name": "SDS_P2", + "value_type": "GAUGE", + }] + + +SensorHost = collections.namedtuple("SensorHost", ("host", "label", "fieldname")) + + +def clean_fieldname(text): + if text == "root": + # "root" is a magic (forbidden) word + return "_root" + else: + return re.sub(r"(^[^A-Za-z_]|[^A-Za-z0-9_])", "_", text) + + +def parse_sensor_hosts_from_description(hosts_description): + """ parse sensor list from the environment variable 'sensor_hosts' and retrieve their data """ + sensors = [] + for token in hosts_description.split(): + if "=" in token: + label, host = token.strip().split("=", 1) + else: + host = token.strip() + label = host + fieldname = clean_fieldname("value_" + host) + sensors.append(SensorHost(host, label, fieldname)) + sensors.sort(key=lambda item: item.fieldname) + return sensors + + +@functools.lru_cache() +def get_sensor_data(host): + """ request the data from a sensor and return a dict (value_type -> value) + + The result is cached - thus we do not need to take care for efficiency. + + Example dataset returned by the sensor: + {"software_version": "NRZ-2017-099", "age":"88", "sensordatavalues":[ + {"value_type":"SDS_P1","value":"27.37"},{"value_type":"SDS_P2","value":"13.53"}, + {"value_type":"temperature","value":"23.70"},{"value_type":"humidity","value":"69.20"}, + {"value_type":"samples","value":"626964"},{"value_type":"min_micro","value":"225"}, + {"value_type":"max_micro","value":"887641"},{"value_type":"signal","value":"-47"}]} + + """ + try: + with urllib.request.urlopen("http://{}/data.json".format(host)) as request: + body = request.read() + except IOError as exc: + print("Failed to retrieve data from '{}': {}".format(host, exc), file=sys.stderr) + return None + try: + data = json.loads(body.decode("utf-8")) + except ValueError as exc: + print("Failed to parse data from '{}': {}".format(host, exc), file=sys.stderr) + return None + return {item["value_type"]: item["value"] for item in data["sensordatavalues"]} + + +def print_graph_section(graph_description, hosts, include_config, include_values): + print("multigraph {}".format(graph_description["name"])) + if include_config: + # graph configuration + print("graph_category sensors") + for key in ("graph_title", "graph_vlabel", "graph_args", "graph_info"): + if key in graph_description: + print("{} {}".format(key, graph_description[key])) + for host_info in hosts: + print("{}.label {}".format(host_info.fieldname, host_info.label)) + print("{}.type {}".format(host_info.fieldname, graph_description["value_type"])) + if include_values: + for host_info in hosts: + # We cannot distinguish between fields that are not supported by the sensor (most are + # optional) and missing data. Thus we cannot handle online/offline sensor data fields, + # too. + data = get_sensor_data(host_info.host) + if data is not None: + value = data.get(graph_description["api_value_name"]) + if value is not None: + print("{}.value {}".format(host_info.fieldname, value)) + print() + + +action = sys.argv[1] if (len(sys.argv) > 1) else "" +sensor_hosts = parse_sensor_hosts_from_description(os.getenv("sensor_hosts", "")) +if not sensor_hosts: + print("ERROR: undefined or empty environment variable 'sensor_hosts'.", file=sys.stderr) + sys.exit(1) + + +if action == "config": + is_dirty_config = (os.getenv("MUNIN_CAP_DIRTYCONFIG") == "1") + for graph in graphs: + print_graph_section(graph, sensor_hosts, True, is_dirty_config) +elif action == "": + for graph in graphs: + print_graph_section(graph, sensor_hosts, False, True) +else: + print("ERROR: unsupported action requested ('{}')".format(action), file=sys.stderr) + sys.exit(2) diff --git a/plugins/lustre/lustre_df b/plugins/lustre/lustre_df index 86913419..551ecae2 100755 --- a/plugins/lustre/lustre_df +++ b/plugins/lustre/lustre_df @@ -3,7 +3,7 @@ =head1 NAME -lustre_df - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's +lustre_df - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's usage in percents =head1 CONFIGURATION diff --git a/plugins/lustre/lustre_df_abs b/plugins/lustre/lustre_df_abs index fb9bc400..dbc5c06b 100755 --- a/plugins/lustre/lustre_df_abs +++ b/plugins/lustre/lustre_df_abs @@ -1,9 +1,9 @@ -#!/usr/bin/perl +#!/usr/bin/perl # -*- perl -*- =head1 NAME -lustre_df_abs - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's +lustre_df_abs - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's usage in TB and/or G =head1 CONFIGURATION @@ -49,8 +49,8 @@ if ($ARGV[0] and $ARGV[0] eq "config") { print "graph_title Lustre cluster storage objects usage in TB\n"; print "graph_args --base 1024 --lower-limit 0\n"; print "graph_vlabel TB\n"; - print "graph_category fs\n"; - + print "graph_category fs\n"; + &print_labels; exit 0; @@ -65,7 +65,7 @@ sub print_labels { my $name = $2.$3; print $name.".label ", $name, "\n"; print $name.".min 0\n"; - print $name.".cdef ", $name,",1024,*\n"; + print $name.".cdef ", $name,",1024,*\n"; } } diff --git a/plugins/lustre/lustre_df_absfree b/plugins/lustre/lustre_df_absfree index 3681cb14..631c6b23 100755 --- a/plugins/lustre/lustre_df_absfree +++ b/plugins/lustre/lustre_df_absfree @@ -1,9 +1,9 @@ -#!/usr/bin/perl +#!/usr/bin/perl # -*- perl -*- =head1 NAME -lustre_df_free - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's +lustre_df_free - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's free space in TB and/or G =head1 CONFIGURATION @@ -49,8 +49,8 @@ if ($ARGV[0] and $ARGV[0] eq "config") { print "graph_title Lustre cluster storage objects free space in TB\n"; print "graph_args --base 1024 --lower-limit 0\n"; print "graph_vlabel TB\n"; - print "graph_category fs\n"; - + print "graph_category fs\n"; + &print_labels; exit 0; @@ -65,7 +65,7 @@ sub print_labels { my $name = $2.$3; print $name.".label ", $name, "\n"; print $name.".min 0\n"; - print $name.".cdef ", $name,",1024,*\n"; + print $name.".cdef ", $name,",1024,*\n"; } } diff --git a/plugins/lustre/lustre_df_inodes b/plugins/lustre/lustre_df_inodes index 35626187..7cc9a25e 100755 --- a/plugins/lustre/lustre_df_inodes +++ b/plugins/lustre/lustre_df_inodes @@ -3,7 +3,7 @@ =head1 NAME -lustre_df_inodes - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's +lustre_df_inodes - Plugin to monitor Lustre 1.8.x (cluster FS) storage objects MDT,OST's usage inodes in percents =head1 CONFIGURATION diff --git a/plugins/lxc/lxc_cpu b/plugins/lxc/lxc_cpu deleted file mode 100755 index 87947c74..00000000 --- a/plugins/lxc/lxc_cpu +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash -# -*- sh -*- - -: << =cut - -=head1 NAME - -lxc_cpu - Plugin to monitor LXC CPU usage - -=head1 CONFIGURATION - - [lxc_*] - user root - -=head1 INTERPRETATION - -This plugin needs root privilege. - -=head1 AUTHOR - -vajtsz vajtsz@gmail.com -mitty mitty@mitty.jp - -=head1 LICENSE - -Unknown license - -=head1 MAGIC MARKERS - - #%# family=auto - #%# capabilities=autoconf - -=cut - -. $MUNIN_LIBDIR/plugins/plugin.sh - -guest_names=`lxc-ls | sort -u` -for guest in $guest_names; do - if lxc-info -n $guest 2>&1 | grep -qs RUNNING ; then - active="$active $guest" - fi -done -guest_names="$active" - - - -f_comm='lxc-cgroup ' - -if [ "$1" = "autoconf" ]; then - if [ -r /proc/stat ]; then - echo yes - exit 0 - else - echo "no (no /proc/stat)" - exit 0 - fi -fi - -if [ "$1" = "config" ]; then - - echo 'graph_title CPU Usage ' - echo 'graph_args -l 0 --base 1000' - echo 'graph_vlabel USER_HZ' - echo 'graph_category cpu' - - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - echo 'cpu_user_'$guest'.label '$guest_name': User' - echo 'cpu_user_'$guest'.type DERIVE' - echo 'cpu_user_'$guest'.min 0' - echo 'cpu_system_'$guest'.label '$guest_name': System' - echo 'cpu_system_'$guest'.type DERIVE' - echo 'cpu_system_'$guest'.min 0' - done - exit 0 -fi - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - - tmp_g=`$f_comm -n $guest_name cpuacct.stat | grep user` - tmp_v=`echo $tmp_g | awk '{print($2)}'` - echo 'cpu_user_'$guest'.value '$tmp_v - - tmp_g=`$f_comm -n $guest_name cpuacct.stat | grep system` - tmp_v=`echo $tmp_g | awk '{print($2)}'` - echo 'cpu_system_'$guest'.value '$tmp_v - - done - - diff --git a/plugins/lxc/lxc_cpu_time b/plugins/lxc/lxc_cpu_time deleted file mode 100755 index 22a5e1f8..00000000 --- a/plugins/lxc/lxc_cpu_time +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -# -*- sh -*- - -: << =cut - -=head1 NAME - -lxc_cpu_time - Plugin to monitor LXC CPU time usage - -=head1 CONFIGURATION - - [lxc_*] - user root - -=head1 INTERPRETATION - -This plugin needs root privilege. - -=head1 AUTHOR - -vajtsz vajtsz@gmail.com -mitty mitty@mitty.jp - -=head1 LICENSE - -Unknown license - -=head1 MAGIC MARKERS - - #%# family=auto - #%# capabilities=autoconf - -=cut - -. $MUNIN_LIBDIR/plugins/plugin.sh - -guest_names=`lxc-ls | sort -u` -for guest in $guest_names; do - if lxc-info -n $guest 2>&1 | grep -qs RUNNING ; then - active="$active $guest" - fi -done -guest_names="$active" - - -f_comm='lxc-cgroup ' - -if [ "$1" = "autoconf" ]; then - if [ -r /proc/stat ]; then - echo yes - exit 0 - else - echo "no (no /proc/stat)" - exit 0 - fi -fi - -if [ "$1" = "config" ]; then - - echo 'graph_title CPU time ' - echo 'graph_args -l 0 --base 1000' - echo 'graph_vlabel nanosec' - echo 'graph_category cpu' - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - echo 'cpu_time_'$guest'.label '$guest_name': CPU time' - echo 'cpu_time_'$guest'.type DERIVE' - echo 'cpu_time_'$guest'.min 0' - done - exit 0 -fi - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - tmp_g=`$f_comm -n $guest_name cpuacct.usage ` - echo 'cpu_time_'$guest'.value '$tmp_g - done - diff --git a/plugins/lxc/lxc_guests b/plugins/lxc/lxc_guests new file mode 100755 index 00000000..97608d4d --- /dev/null +++ b/plugins/lxc/lxc_guests @@ -0,0 +1,416 @@ +#!/bin/sh +# -*- sh -*- + +: << =cut + +=head1 NAME + +lxc_guests - collect statistics about containers virtualized via LXC + +=head1 CONFIGURATION + + [lxc_guests] + user root + + # The memory usage of containers are by default drawn as stacked area + # charts. Alternatively a non-stacked graph with lines can be configured. + # Default: true + #env.ram_display_stacked true + + # lxc container path, default below + #env.lxcpath /var/lib/lxc + + # exclude the following containers + # (default none excluded) + #env.exclude container1 container2 + + # path where tasks sysfs files are stored, + # set this if the various attempts in the + # code don't work + # (default none) + #env.cgrouppath /sys/fs/cgroup/cpuacct/lxc/ + +=head1 INTERPRETATION + +This plugin needs root privilege. + +This plugin has been tested with lxc 3 and +lx2 (on Debian buster and Debian jessie, +respectively). + +For the network graphs to work, you need +to have in every container's config file +a line defining the virtual network interface +path (else lxc will use a random name at +each container's start); see the lxc_netdev() +function below. + +If using lxc 2, make sure you do not have cruft +in your container config files, you can test +it with: + lxc-cgroup -o /dev/stdout -l INFO -n 104 cpuacct.usage +-- with 104 a valid lxc instance), if you +get a warning, fix the config file. + +For the logins graph, the "users" command is required in each +container. + +Tested on Debian buster and Debian jessie. + + +=head1 AUTHOR + +vajtsz vajtsz@gmail.com +mitty mitty@mitty.jp +alphanet schaefer@alphanet.ch (many changes and multigraph) +Lars Kruse + +=head1 LICENSE + +2-clause BSD License +or GPLv3 license or later, at your option + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=cut + +set -eu + + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + + +lxcpath=${lxcpath:-/var/lib/lxc} +# containers to be ignored +exclude=${exclude:-} +ram_display_stacked=${ram_display_stacked:-true} +# try to guess the location, if empty +cgrouppath=${cgrouppath:-} + + +# --- FUNCTIONS + +get_active_guests() { + local excludes="$1" + local guest_name + for guest_name in $(lxc-ls) + do + # handle optional exclude list in $1 + if ! echo "$excludes" | grep -qwF "$guest_name"; then + if lxc-info -n "$guest_name" --state 2>/dev/null | grep -qw RUNNING; then + echo "$guest_name" + fi + fi + done +} + + +get_lxc_cgroup_info() { + local guest_name="$1" + local field="$2" + # lxc3 (lxc < 3: may output some warnings if there is cruft in your config dir) + lxc-cgroup -o /dev/stdout -l INFO -n "$guest_name" "$field" | sed 's/^.*lxc_cgroup.c:main:[0-9][0-9]* - //' +} + + +lxc_netdev() { + local guest_name="$1" + + if [ -f "$lxcpath/$guest_name/config" ]; then + # lxc 3 vs < 3 + (grep -E '^lxc.net.0.veth.pair' "$lxcpath/$guest_name/config" 2>/dev/null \ + || grep -E '^lxc.network.veth.pair' "$lxcpath/$guest_name/config" + ) | awk '{print $NF;}' + fi +} + + +# find proper sysfs and count it +# Debian 6.0: /sys/fs/cgroup//tasks +# Ubuntu 12.04 with fstab: /sys/fs/cgroup/lxc//tasks +# Ubuntu 12.04 with cgroup-lite: /sys/fs/cgroup/cpuacct/lxc//tasks +# Ubuntu 12.04 with cgroup-bin: /sys/fs/cgroup/cpuacct/sysdefault/lxc//tasks +# Ubuntu 14.04 /sys/fs/cgroup/systemd/lxc//tasks +# and with cgmanager on jessie +lxc_count_processes () { + local guest_name="$1" + local SYSFS + + [ -z "$guest_name" ] && return 0 + + if [ -n "$cgrouppath" ]; then + SYSFS="$cgrouppath/$guest_name/tasks" + if [ -e "$SYSFS" ]; then + wc -l <"$SYSFS" + return + fi + fi + + for SYSFS in \ + "/sys/fs/cgroup/$guest_name/tasks" \ + "/sys/fs/cgroup/lxc/$guest_name/tasks" \ + "/sys/fs/cgroup/cpuacct/lxc/$guest_name/tasks" \ + "/sys/fs/cgroup/systemd/lxc/$guest_name/tasks" \ + "/sys/fs/cgroup/cpuacct/sysdefault/lxc/$guest_name/tasks" + do + if [ -e "$SYSFS" ]; then + wc -l <"$SYSFS" + return + fi + done + + if [ -e /usr/bin/cgm ]; then + cgm getvalue cpu "lxc/$guest_name" tasks 2>/dev/null | wc -l + else + get_lxc_cgroup_info "$guest_name" "tasks" | wc -l + fi +} + + +# change the first character of a string to upper case +title_case() { + local text="$1" + printf "%s%s" "$(echo "$text" | cut -c 1 | tr "[:lower:]" "[:upper:]")" "$(echo "$text" | cut -c 2-)" +} + + +do_autoconf() { + if [ ! -r /proc/net/dev ]; then + echo "no (/proc/net/dev cannot be read)" + elif [ ! -e "$lxcpath" ]; then + echo "no ($lxcpath is not present)" + elif [ -z "$(which lxc-ls)" ]; then + echo "no ('lxc-ls' is not available in PATH)" + else + echo yes + fi +} + + +do_config() { + local active_guests guest_name draw_style + active_guests=$(get_active_guests "$exclude") + + cat <&2 "Invalid action requested (none of: autoconf / config / '')" + exit 1 +esac diff --git a/plugins/lxc/lxc_net b/plugins/lxc/lxc_net deleted file mode 100755 index 872147a7..00000000 --- a/plugins/lxc/lxc_net +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/sh - -: << =cut - -=head1 NAME - -lxc_net - Munin plugin to graph traffic of active LXC containers. - -=head1 APPLICABLE SYSTEMS - -LXC container with "lxc.network.type=veth" and "lxc.network.veth.pair" settings. - -=head1 CONFIGURATION - -env.lxcpath - Set the path where LXC containers are stored, default: /var/lib/lxc -env.exclude - Removing containers from graphs, default: empty - - [lxc_net] - env.lxcpath /var/lib/lxc - env.exclude container1 container2 - -=head1 INTERPRETATION - -This plugin reads a "lxc.network.veth.pair" setting from "config" file of each container, -because lxc-start command creates a random named veth device without the setting. - -If your xen config (/var/lib/lxc/GUEST_NAME/config does not contain this parameter, -then you have to fill it, because if every guest restart generate new device name, then the graph will be useless -( example config : lxc.network.veth.pair = vethsamba ) - -=head1 AUTHOR - -mitty mitty@mitty.jp - -=head1 LICENSE - -2-clause BSD License - -=head1 MAGIC MARKERS - - #%# familiy=auto - #%# capabilities=autoconf - -=cut - -. $MUNIN_LIBDIR/plugins/plugin.sh - -lxcpath=${lxcpath:-/var/lib/lxc} - -if [ "$1" = "autoconf" ]; then - if [ ! -r /proc/net/dev ]; then - echo "no (/proc/net/dev cannot be read)" - exit 0 - fi - if [ ! -e "$lxcpath" ]; then - echo "no ($lxcdir is not present)" - exit 0 - fi - - echo yes -fi - -actives="" -for guest in `ls $lxcpath`; do - if [ `echo $exclude | grep -c "\b$guest\b"` -eq 1 ]; then - continue; - fi - if [ -f "$lxcpath/$guest/config" ]; then - devices=`grep '^lxc\.network\.veth\.pair[ \t]*=[ \t]*' $lxcpath/$guest/config | \ - awk '{ split($0, a, /=/); gsub(/[ \t]/, "", a[2]); print a[2]; }'` - if [ -n "$devices" ]; then - for device in $devices; do - device_re=`echo $device | sed -e 's/\./\\\\./g'` - if [ `grep -c "^ *$device_re:" /proc/net/dev` -eq 1 ]; then - actives="$actives $guest" - eval "dev_$(clean_fieldname $guest)=$device" - fi - done - fi - fi -done - -if [ "$1" = "config" ]; then - echo "graph_title Network traffic" - echo "graph_args --base 1000" - echo "graph_vlabel bits in (-) / out (+) per ${graph_period}" - echo "graph_category network" - echo "graph_info This graph shows the traffic of active LXC containers." - - for guestname in $actives; do - guest=$(clean_fieldname $guestname) - device=$(eval 'echo $dev_'$guest) - bps="U" - if [ -r /sys/class/net/$device/speed ]; then - bps=$(cat /sys/class/net/$device/speed) - bps=$(($bps * 1000 * 1000)) - fi - - echo "${guest}_down.label $guestname" - echo "${guest}_down.type DERIVE" - echo "${guest}_down.graph no" - echo "${guest}_down.cdef ${guest}_down,8,*" - echo "${guest}_down.min 0" - echo "${guest}_down.max $bps" - echo "${guest}_up.label $guestname" - echo "${guest}_up.type DERIVE" - echo "${guest}_up.negative ${guest}_down" - echo "${guest}_up.cdef ${guest}_up,8,*" - echo "${guest}_up.min 0" - echo "${guest}_up.max $bps" - done - exit 0 -fi - - -for guest in $actives; do - guest=$(clean_fieldname $guest) - device=$(eval 'echo $dev_'$guest) - device_re=`echo $device | sed -e 's/\./\\\\./g'` - line=`grep "^ *$device_re:" /proc/net/dev` - echo -n "${guest}_down.value " - echo $line | awk '{ - split($0, a, /: */); - print $2; - }' - echo -n "${guest}_up.value " - echo $line | awk '{ - split($0, a, /: */); - print $10; - }' -done diff --git a/plugins/lxc/lxc_proc b/plugins/lxc/lxc_proc deleted file mode 100755 index c2e3b02d..00000000 --- a/plugins/lxc/lxc_proc +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# -*- sh -*- - -: << =cut - -=head1 NAME - -lxc_proc - Plugin to monitor LXC Processes count - -=head1 CONFIGURATION - -env.cgrouppath - Set the path where 'tasks' sysfs files are stored, default: empty - - [lxc_proc] - user root - env.cgrouppath /sys/fs/cgroup/cpuacct/lxc/ - -=head1 INTERPRETATION - -This plugin needs root privilege. - -=head1 AUTHOR - -vajtsz vajtsz@gmail.com - -=head1 LICENSE - -Unknown license - -=head1 MAGIC MARKERS - - #%# family=auto - #%# capabilities=autoconf - -=cut - -. $MUNIN_LIBDIR/plugins/plugin.sh - -## find proper sysfs and count it -# Debian 6.0: /sys/fs/cgroup//tasks -# Ubuntu 12.04 with fstab: /sys/fs/cgroup/lxc//tasks -# Ubuntu 12.04 with cgroup-lite: /sys/fs/cgroup/cpuacct/lxc//tasks -# Ubuntu 12.04 with cgroup-bin: /sys/fs/cgroup/cpuacct/sysdefault/lxc//tasks -count_processes () { - [ -z "$1" ] && return 0 - - if [ -n "$cgrouppath" ]; then - SYSFS=$cgrouppath/$1/tasks - if [ -e $SYSFS ]; then - return `wc -l < $SYSFS` - fi - fi - - for SYSFS in \ - /sys/fs/cgroup/$1/tasks \ - /sys/fs/cgroup/lxc/$1/tasks \ - /sys/fs/cgroup/cpuacct/lxc/$1/tasks \ - /sys/fs/cgroup/cpuacct/sysdefault/lxc/$1/tasks \ - ; do - if [ -e $SYSFS ]; then - return `wc -l < $SYSFS` - fi - done - - return 0 -} - - -guest_names=`lxc-ls | sort -u` -for guest in $guest_names; do - if lxc-info -n $guest 2>&1 | grep -qs RUNNING ; then - active="$active $guest" - fi -done -guest_names="$active" - - - -f_comm='lxc-cgroup ' - -if [ "$1" = "autoconf" ]; then - if [ -r /proc/stat ]; then - echo yes - exit 0 - else - echo "no (no /proc/stat)" - exit 0 - fi -fi - -if [ "$1" = "config" ]; then - - echo 'graph_title Processes ' - echo 'graph_args -l 0 --base 1000' - echo 'graph_vlabel Number of processes' - echo 'graph_category processes' - - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - echo 'lxc_proc_'$guest'.label '$guest_name': processes' - echo 'lxc_proc_'$guest'.type GAUGE' - echo 'lxc_proc_'$guest'.min 0' - done - exit 0 -fi - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - - count_processes $guest_name - tmp_g=$? - if [ $tmp_g -eq 0 ]; then - tmp_g=`$f_comm -n $guest_name tasks | wc -l` - fi - echo 'lxc_proc_'$guest'.value '$tmp_g - - - done - - diff --git a/plugins/lxc/lxc_ram b/plugins/lxc/lxc_ram deleted file mode 100755 index db770778..00000000 --- a/plugins/lxc/lxc_ram +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# -*- sh -*- - -: << =cut - -=head1 NAME - -lxc_ram - Plugin to monitor LXC memory usage. - -=head1 CONFIGURATION - - [lxc_*] - user root - -=head1 INTERPRETATION - -This plugin needs root privilege. - -=head1 AUTHOR - -vajtsz vajtsz@gmail.com -mitty mitty@mitty.jp - -=head1 LICENSE - -Unknown license - -=head1 MAGIC MARKERS - - #%# family=auto - #%# capabilities=autoconf - -=cut - -. $MUNIN_LIBDIR/plugins/plugin.sh - -guest_names=`lxc-ls | sort -u` -for guest in $guest_names; do - if lxc-info -n $guest 2>&1 | grep -qs RUNNING ; then - active="$active $guest" - fi -done -guest_names="$active" - - -f_comm='lxc-cgroup ' - -if [ "$1" = "autoconf" ]; then - if [ -r /proc/stat ]; then - echo yes - exit 0 - else - echo "no (no /proc/stat)" - exit 0 - fi -fi - -if [ "$1" = "config" ]; then - - echo 'graph_title Memory ' - echo 'graph_args -l 0 --base 1024' - echo 'graph_vlabel byte' - echo 'graph_category memory' - - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - - echo 'mem_usage_'$guest'.label '$guest_name': Mem usage' - echo 'mem_usage_'$guest'.type GAUGE' - echo 'mem_cache_'$guest'.label '$guest_name': Cache' - echo 'mem_cache_'$guest'.type GAUGE' - echo 'mem_active_'$guest'.label '$guest_name': Active' - echo 'mem_active_'$guest'.type GAUGE' - echo 'mem_inactive_'$guest'.label '$guest_name': Inactive' - echo 'mem_inactive_'$guest'.type GAUGE' - - done - - exit 0 -fi - - - for guest_name in $guest_names; - do - guest="$(clean_fieldname $guest_name)" - - tmp_v=`$f_comm -n $guest_name memory.usage_in_bytes` - echo 'mem_usage_'$guest'.value '$tmp_v - - tmp_g=`$f_comm -n $guest_name memory.stat | grep total_cache` - tmp_v=`echo $tmp_g | awk '{print($2)}'` - echo 'mem_cache_'$guest'.value '$tmp_v - - tmp_g=`$f_comm -n $guest_name memory.stat | grep total_active_anon` - tmp_v=`echo $tmp_g | awk '{print($2)}'` - echo 'mem_active_'$guest'.value '$tmp_v - - tmp_g=`$f_comm -n $guest_name memory.stat | grep total_inactive_anon` - tmp_v=`echo $tmp_g | awk '{print($2)}'` - echo 'mem_inactive_'$guest'.value '$tmp_v - - done diff --git a/plugins/lxd/lxd_mem b/plugins/lxd/lxd_mem index f81a767d..23fb40f9 100755 --- a/plugins/lxd/lxd_mem +++ b/plugins/lxd/lxd_mem @@ -10,7 +10,7 @@ try: except: HAS_LIB=False errors.append("no pylxd module") - + c=None HAS_ACCESS=True try: diff --git a/plugins/mail/eoc_subscribers_count b/plugins/mail/eoc_subscribers_count index 116d0183..e5c2097b 100755 --- a/plugins/mail/eoc_subscribers_count +++ b/plugins/mail/eoc_subscribers_count @@ -17,22 +17,21 @@ #%# capabilities=autoconf if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi -LISTNAME=`basename $0 | sed 's/^eoc_subscribers_count_//g' | tr '_' '@'` +LISTNAME=$(basename "$0" | sed 's/^eoc_subscribers_count_//g' | tr '_' '@') if [ "$1" = "config" ]; then - echo 'graph_title Number of subscribers to '${LISTNAME} + echo "graph_title Number of subscribers to ${LISTNAME}" echo 'graph_vlabel subscribers' echo 'graph_category mailinglist' echo 'subscribers.label subscribers' echo 'subscribers.draw AREA' - echo 'subscribers.info Number of subscribers to the list' + echo 'subscribers.info Number of subscribers to the list' exit 0 fi -echo -n subscribers.value\ -enemies-of-carlotta --name ${LISTNAME} --list | wc -l +printf "subscribers.value %s" "$(enemies-of-carlotta --name "${LISTNAME}" --list | wc -l)" diff --git a/plugins/mail/imap_bandwidth b/plugins/mail/imap_bandwidth index be8084b1..18060355 100755 --- a/plugins/mail/imap_bandwidth +++ b/plugins/mail/imap_bandwidth @@ -116,7 +116,7 @@ fi if [ "$1" = "config" ]; then echo 'graph_title IMAP bandwidth' echo 'graph_vlabel to (+) / from (-) server [bit/s]' - echo 'graph_category network' + echo 'graph_category mail' for item in $SERVERS; do key="$(echo "$item" | cut -f 1 -d =)" clean_name="$(clean_fieldname "$key")" diff --git a/plugins/mail/mail_connections b/plugins/mail/mail_connections index 0a59cbef..5c71ac87 100755 --- a/plugins/mail/mail_connections +++ b/plugins/mail/mail_connections @@ -29,9 +29,9 @@ fi if [ "$1" = "config" ]; then cat < + +=head1 LICENSE + +GPLv2 + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +queuedir=${queuedir:-/var/spool/nullmailer/queue} +errordir=${errordir:-/var/spool/nullmailer/failed} + +failed_warning=${failed_warning:-0:0} + +case $1 in + autoconf) + if command -v nullmailer-queue >/dev/null 2>/dev/null; then + [ -r "$queuedir" ] && echo yes || echo "no (queue dir not readable)" + else + echo "no (nullmailer not installed)" + fi + ;; + + config) + cat <<-EOF +graph_args -l 0 +graph_title Nullmailer queue +graph_vlabel emails +graph_total total +graph_category mail + +queue.label queued +queue.draw AREASTACK +queue.info Number of emails currently in the queue + +failed.label failed +failed.draw AREASTACK +failed.info Number of emails that have permanently failed to send +EOF + print_warning queue + print_critical queue + print_warning failed + print_critical failed + ;; + + *) + echo "queue.value $(find "$queuedir" -type f | wc -l)" + # Failed does not exist until there has been a failure, so mute the "file not found" + echo "failed.value $(find "$errordir" -type f 2> /dev/null | wc -l)" + ;; +esac diff --git a/plugins/mail/postfwd-rbl b/plugins/mail/postfwd-rbl old mode 100644 new mode 100755 index c2278f6c..e875e160 --- a/plugins/mail/postfwd-rbl +++ b/plugins/mail/postfwd-rbl @@ -28,7 +28,7 @@ fi if [ "$1" = "config" ]; then echo 'graph_title Postfwd' - echo 'graph_category fw' + echo 'graph_category spamfilter' echo 'graph_args --base 1000 -l 0' echo 'graph_vlabel Postfwd statistics' echo 'spamhaus.label Listed on Spamhaus.org' diff --git a/plugins/mail/procmail_ b/plugins/mail/procmail_ index 1838c0b3..65f77ce8 100755 --- a/plugins/mail/procmail_ +++ b/plugins/mail/procmail_ @@ -35,7 +35,7 @@ sub config { print "graph_title Destination folders for Postfix\n", "graph_vlabel Received mails\n", - "graph_category Mail\n", + "graph_category mail\n", "graph_info Gives you the total mails stored by Postfix by folder\n"; print "$_.label $_\n" foreach sort keys %{$state->{folders}}; } @@ -85,10 +85,10 @@ sub get_log_size { sub get_log_offset { my ($size); - # The offset is expressed as the number of lines to skip. We get to that + # The offset is expressed as the number of lines to skip. We get to that # point getting the total log size (get_log_size) and using tail for the - # difference. If the offset is larger than the file itself, we get it - # whole (it might have just been rotated). + # difference. If the offset is larger than the file itself, we get it + # whole (it might have just been rotated). $size = get_log_size(); $state->{offset} ||= 0; diff --git a/plugins/mailman/mailman-queue-check b/plugins/mailman/mailman-queue-check index 3a9deab3..86a49773 100755 --- a/plugins/mailman/mailman-queue-check +++ b/plugins/mailman/mailman-queue-check @@ -56,5 +56,5 @@ echo "news.value ${array[4]}" echo "out.value ${array[5]}" echo "retry.value ${array[6]}" exit 0 - + diff --git a/plugins/mailman/mailman_subscribers b/plugins/mailman/mailman_subscribers index a5809d4f..9cf7db41 100755 --- a/plugins/mailman/mailman_subscribers +++ b/plugins/mailman/mailman_subscribers @@ -30,7 +30,7 @@ # # to /etc/munin/plugin-conf.d/munin-node # -# Magic markers (optinal - used by munin-config and some installation +# Magic markers (optional - used by munin-config and some installation # scripts): # #%# family=manual @@ -61,7 +61,7 @@ if ($ARGV[0] and $ARGV[0] eq "config" ){ print "graph_category mailinglist\n"; print "graph_total Total\n"; print 'graph_info Plugin available at http://rodolphe.quiedeville.org/hack/munin/mailman/'."\n"; - + my $num =0; while (($list,$desc) = each(%lists)) { $label=$list; diff --git a/plugins/mediawiki/mediawiki b/plugins/mediawiki/mediawiki index 09e3bbb3..d6107a42 100755 --- a/plugins/mediawiki/mediawiki +++ b/plugins/mediawiki/mediawiki @@ -4,7 +4,7 @@ # Reads number of edits,views,articles,pages,users,admins and images from a Mediawiki # http://www.mediawiki.org/wiki/MediaWiki | http://munin.projects.linpro.no/wiki # by mutante of S23 | http://s23.org/wiki | greets to hundfred -# 2007-02-12 | v0.1 first version, didnt really work yet +# 2007-02-12 | v0.1 first version, didn't really work yet # 2007-02-16 | v0.2 introduced different retrieval methods, separate graphs for the different values that can be symlinked.. # What you need to config: @@ -29,7 +29,7 @@ # require_once("/home/mutante/wiki_mysql_conf.php"); -# I include the database settings from elsewhere, so i dont have to show the password in /usr/share/.. +# I include the database settings from elsewhere, so i don't have to show the password in /usr/share/.. # I also set "[mediawiki] user mutante" in plugin-conf.d/ so that my user can read the config # alternatively set them in here like: @@ -41,7 +41,7 @@ ## B - URL reading # These methods all retrieve the Special:Statistics?action=raw URL from Mediawiki via the webserver -# This is the preferred method to get accurate stats currently, because Mediawiki doesnt use site_stats correctly atm +# This is the preferred method to get accurate stats currently, because Mediawiki does not use site_stats correctly atm # getmethod="curl" # uses curl via libcurl from PHP, should be fastest but you need the lib installed. if it works, use this. diff --git a/plugins/mediawiki/mediawiki_api b/plugins/mediawiki/mediawiki_api old mode 100644 new mode 100755 diff --git a/plugins/memcached/memcached_bytes_all b/plugins/memcached/memcached_bytes_all index 7b7bbe95..bfe5c4d5 100755 --- a/plugins/memcached/memcached_bytes_all +++ b/plugins/memcached/memcached_bytes_all @@ -1,7 +1,7 @@ #!/usr/bin/env perl # ex:ts=4 # Copyright © Nicolas BOUTHORS / Smile -# +# # Licence GPLv2 # # Based on a script distributed on munin-exchange. @@ -11,10 +11,10 @@ use warnings; use Cache::Memcached; -my %instances = (); +my %instances = (); # Will look into /etc for memcached config files and extract TCP listening port -# from the config file. +# from the config file. sub fetch_instances() { my @files = glob("/etc/memcached_*.conf"); undef $/; diff --git a/plugins/memcached/memcached_ext_bytes_ b/plugins/memcached/memcached_ext_bytes_ index 8dda1871..7b1606f8 100755 --- a/plugins/memcached/memcached_ext_bytes_ +++ b/plugins/memcached/memcached_ext_bytes_ @@ -23,7 +23,7 @@ env.label "second local server" my $label = exists $ENV{'label'} ? $ENV{'label'} : ''; unless( $label ){ - + if( $0 =~ /memcached_ext_bytes_([\d\w]+)$/ ){ $label = $1; } diff --git a/plugins/memcached/memcached_ext_connections_ b/plugins/memcached/memcached_ext_connections_ index 326c3afc..f4b0d497 100755 --- a/plugins/memcached/memcached_ext_connections_ +++ b/plugins/memcached/memcached_ext_connections_ @@ -23,7 +23,7 @@ env.label "second local server" my $label = exists $ENV{'label'} ? $ENV{'label'} : ''; unless( $label ){ - + if( $0 =~ /memcached_ext_connections_([\w\d]+)$/ ){ $label = $1; } diff --git a/plugins/memcached/memcached_ext_hits_ b/plugins/memcached/memcached_ext_hits_ index 2aca77a5..8e23ef98 100755 --- a/plugins/memcached/memcached_ext_hits_ +++ b/plugins/memcached/memcached_ext_hits_ @@ -23,7 +23,7 @@ env.label "second local server" my $label = exists $ENV{'label'} ? $ENV{'label'} : ''; unless( $label ){ - + if( $0 =~ /memcached_ext_hits_([\w\d]+)$/ ){ $label = $1; } diff --git a/plugins/memcached/memcached_ext_items_ b/plugins/memcached/memcached_ext_items_ index 19e359f4..53904e53 100755 --- a/plugins/memcached/memcached_ext_items_ +++ b/plugins/memcached/memcached_ext_items_ @@ -23,7 +23,7 @@ env.label "second local server" my $label = exists $ENV{'label'} ? $ENV{'label'} : ''; unless( $label ){ - + if( $0 =~ /memcached_ext_items_([\w\d]+)$/ ){ $label = $1; } diff --git a/plugins/memcached/memcached_ext_requests_ b/plugins/memcached/memcached_ext_requests_ index 9c1d9017..0172a2c2 100755 --- a/plugins/memcached/memcached_ext_requests_ +++ b/plugins/memcached/memcached_ext_requests_ @@ -23,7 +23,7 @@ env.label "second local server" my $label = exists $ENV{'label'} ? $ENV{'label'} : ''; unless( $label ){ - + if( $0 =~ /memcached_ext_requests_([\w\d]+)$/ ){ $label = $1; } diff --git a/plugins/memcached/memcached_ext_traffic_ b/plugins/memcached/memcached_ext_traffic_ index a9354ca0..f77e1d98 100755 --- a/plugins/memcached/memcached_ext_traffic_ +++ b/plugins/memcached/memcached_ext_traffic_ @@ -23,7 +23,7 @@ env.label "second local server" my $label = exists $ENV{'label'} ? $ENV{'label'} : ''; unless( $label ){ - + if( $0 =~ /memcached_ext_traffic_([\w\d]+)$/ ){ $label = $1; } diff --git a/plugins/memcached/memcached_multi_ b/plugins/memcached/memcached_multi_ index f38affa0..36048acc 100755 --- a/plugins/memcached/memcached_multi_ +++ b/plugins/memcached/memcached_multi_ @@ -133,7 +133,7 @@ my $leitime = $ENV{leitime} || -1; # This gives us the ability to specify which commands we want to display on the # command graph. Allowing finer control since some environments don't leverage # every command possible in memcached. -# Options: get set delete incr decr cas touch flush +# Options: get set delete incr decr case touch flush my $commands = $ENV{cmds} || "get set delete incr decr touch"; # This hash contains the information contained in two memcache commands @@ -145,7 +145,7 @@ my %stats; my %items; # This gives us the memory size and usage per slab -# We track this so we can see what slab is being used the most and has no free chunks +# We track this so we can see what slab is being used the most and has no free chunks # so we can re-tune memcached to allocate more pages for the specified chunk size my %chnks; @@ -396,7 +396,7 @@ $graphs{slabitems} = { info => 'This graph shows you the number of items and reclaimed items per slab.', }, datasrc => [ - { name => 'number', label => 'Items', draw => 'AREA', + { name => 'number', label => 'Items', draw => 'AREA', info => 'This is the amount of items stored in this slab', min => '0' }, ], }; @@ -437,8 +437,8 @@ $graphs{slabunfetched} = { =head2 Config Check This block of code looks at the argument that is possibly supplied, - should it be config, it then checks to make sure the plugin - specified exists, assuming it does, it will run the do_config + should it be config, it then checks to make sure the plugin + specified exists, assuming it does, it will run the do_config subroutine for the plugin specified, otherwise it dies complaining about an unknown plugin. @@ -488,7 +488,7 @@ if (defined $ARGV[0] && $ARGV[0] eq 'autoconf') { which can be specified. Note we only specify the root graphs for the multigraphs, since the rest of the subgraphs will appear "behind" the root graphs. It also attempts to connect to the memcached service to - verify it is infact running. + verify it is in fact running. =cut @@ -584,7 +584,7 @@ sub fetch_output { This subroutine prints out the return values for our non-multigraph root graphs. It takes one parameter $plugin and returns when completed. - $plugin; graph we are calling up to print data values for + $plugin; graph we are calling up to print data values for Example: print_root_output($plugin); @@ -939,7 +939,7 @@ sub print_submulti_config { # Lets set our graph reference, and main graph config for easy handling my $graph = $graphs{$sgraph}; my %graphconf = %{$graph->{config}}; - # Lets tell munin which graph we are graphing, and what our main graph config info is + # Lets tell munin which graph we are graphing, and what our main graph config info is print "multigraph memcached_multi_$plugin.$sgraph\_$slabid\n"; while ( my ($key, $value) = each(%graphconf)) { if ($key eq 'title') { diff --git a/plugins/memcached/memcached_servers_ b/plugins/memcached/memcached_servers_ old mode 100644 new mode 100755 index bf5dfcd2..fb80ee91 --- a/plugins/memcached/memcached_servers_ +++ b/plugins/memcached/memcached_servers_ @@ -48,7 +48,7 @@ Link the plugin to get the desirec output, for example: memcached_multi_bytes =head1 ACKNOWLEDGEMENTS -This plugin is based on the available memcached plugins at +This plugin is based on the available memcached plugins at L =head1 AUTHORS diff --git a/plugins/memory/kmemsum b/plugins/memory/kmemsum index 74fe7106..a09a0fa8 100755 --- a/plugins/memory/kmemsum +++ b/plugins/memory/kmemsum @@ -1,7 +1,7 @@ #!/bin/sh # Kernel Memory usage stats. -# Author: alex@trull.org -# +# Author: alex@trull.org +# # Based on the short script at http://wiki.freebsd.org/ZFSTuningGuide (20080820) # # Parameters: @@ -16,18 +16,15 @@ if [ "$1" = "autoconf" ]; then if [ -x /sbin/sysctl ]; then - /sbin/sysctl vm.kmem_size_max > /dev/null - if [ $? = "0" ]; then - echo yes - exit 0 - else - echo no - exit 1 - fi + if /sbin/sysctl vm.kmem_size_max >/dev/null 2>&1; then + echo "yes" + else + echo "no (missing sysctl variable 'vm.kmem_size_max')" + fi else - echo no - exit 1 + echo "no (missing 'sysctl' executable)" fi + exit 0 fi TEXT=`kldstat | tr a-f A-F | awk 'BEGIN {print "ibase=16"}; NR > 1 {print $4}' | bc | awk '{a+=$1}; END {print a}'` @@ -45,7 +42,7 @@ if [ "$1" = "config" ]; then echo 'text.info kmem text' echo 'text.draw AREA' echo 'data.label data' - echo 'data.info kmem data' + echo 'data.info kmem data' echo 'data.draw STACK' echo 'total.label total' echo 'total.info kmem total' @@ -61,4 +58,4 @@ echo "data.value $DATA" echo "total.value $TOTAL" echo "max.value $MAX" - + diff --git a/plugins/memory/multimemory b/plugins/memory/multimemory index 1227e7fb..906b7b55 100755 --- a/plugins/memory/multimemory +++ b/plugins/memory/multimemory @@ -20,7 +20,7 @@ There is no default configuration. This is an example: env.os freebsd env.names apache2 mysqld php-cgi -Set env.os to freebsd if you are running this script on a machine which doesnt have +Set env.os to freebsd if you are running this script on a machine which does not have GNU sed installed (FreeBSD / OpenBSD / Solaris ...), else set it to linux. The names are used to grep with directly, after cleaning. So, this plugin diff --git a/plugins/memory/proc_memory_status b/plugins/memory/proc_memory_status index 3ad2752c..6f2f07fc 100755 --- a/plugins/memory/proc_memory_status +++ b/plugins/memory/proc_memory_status @@ -14,11 +14,10 @@ pid=`pgrep -o -x "$process"` if [ "$1" = "autoconf" ]; then if [ -r /proc/$pid/status ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -33,28 +32,28 @@ if [ "$1" = "config" ]; then echo 'VmExe.label VmExe' echo 'VmExe.draw AREA' - echo "VmExe.info The size of the executable segment" - + echo "VmExe.info The size of the executable segment" + echo 'VmLib.label VmLib' echo 'VmLib.draw STACK' echo 'VmLib.info The size of the library code' - + echo 'VmStk.label VmStk' echo 'VmStk.draw STACK' echo 'VmStk.info The stack size' - + echo 'VmLck.label VmLck' echo 'VmLck.draw STACK' echo 'VmLck.info The amount of locked memory' - + echo 'VmData.label VmData' echo 'VmData.draw STACK' echo 'VmData.info The size of the Data segment' - + echo 'VmRSS.label VmRSS' echo 'VmRSS.draw LINE2' echo 'VmRSS.info The amount of memory mapped in RAM ( instead of swapped out)' - + echo 'VmSize.label VmSize' echo 'VmSize.draw LINE2' echo 'VmSize.info The size of the virtual memory allocated to the process' diff --git a/plugins/minecraft/bukkit-jsonapi-players b/plugins/minecraft/bukkit-jsonapi-players old mode 100644 new mode 100755 index adf8710d..c414f0a8 --- a/plugins/minecraft/bukkit-jsonapi-players +++ b/plugins/minecraft/bukkit-jsonapi-players @@ -12,7 +12,7 @@ * * Author: Jonas Friedmann (http://frd.mn) * GitHub: https://github.com/yeahwhat-mc/munin-bukkit-plugins - * + * */ /** diff --git a/plugins/minecraft/bukkit-jsonapi-ramusage b/plugins/minecraft/bukkit-jsonapi-ramusage old mode 100644 new mode 100755 index 4de54e93..c4003234 --- a/plugins/minecraft/bukkit-jsonapi-ramusage +++ b/plugins/minecraft/bukkit-jsonapi-ramusage @@ -12,7 +12,7 @@ * * Author: Jonas Friedmann (http://frd.mn) * GitHub: https://github.com/yeahwhat-mc/munin-bukkit-plugins - * + * */ /** diff --git a/plugins/minecraft/bukkit-jsonapi-tps b/plugins/minecraft/bukkit-jsonapi-tps old mode 100644 new mode 100755 index 597861ff..33fb0e8c --- a/plugins/minecraft/bukkit-jsonapi-tps +++ b/plugins/minecraft/bukkit-jsonapi-tps @@ -12,7 +12,7 @@ * * Author: Jonas Friedmann (http://frd.mn) * GitHub: https://github.com/yeahwhat-mc/munin-bukkit-plugins - * + * */ /** diff --git a/plugins/minecraft/bukkit-statistician-killshostile b/plugins/minecraft/bukkit-statistician-killshostile old mode 100644 new mode 100755 index 0e4640e1..6478c841 --- a/plugins/minecraft/bukkit-statistician-killshostile +++ b/plugins/minecraft/bukkit-statistician-killshostile @@ -13,7 +13,7 @@ * * Author: Jonas Friedmann (http://frd.mn) * GitHub: https://github.com/yeahwhat-mc/munin-bukkit-plugins - * + * */ /** diff --git a/plugins/minecraft/bukkit-statistician-killsneutral b/plugins/minecraft/bukkit-statistician-killsneutral old mode 100644 new mode 100755 index cfa85f56..e0dae2cc --- a/plugins/minecraft/bukkit-statistician-killsneutral +++ b/plugins/minecraft/bukkit-statistician-killsneutral @@ -13,7 +13,7 @@ * * Author: Jonas Friedmann (http://frd.mn) * GitHub: https://github.com/yeahwhat-mc/munin-bukkit-plugins - * + * */ /** diff --git a/plugins/minecraft/bukkit-statistician-killspassive b/plugins/minecraft/bukkit-statistician-killspassive old mode 100644 new mode 100755 index 8344ab15..66801109 --- a/plugins/minecraft/bukkit-statistician-killspassive +++ b/plugins/minecraft/bukkit-statistician-killspassive @@ -13,7 +13,7 @@ * * Author: Jonas Friedmann (http://frd.mn) * GitHub: https://github.com/yeahwhat-mc/munin-bukkit-plugins - * + * */ /** diff --git a/plugins/minecraft/bukkit-statistician-players b/plugins/minecraft/bukkit-statistician-players old mode 100644 new mode 100755 diff --git a/plugins/minecraft/bukkit-ultrabans-shame b/plugins/minecraft/bukkit-ultrabans-shame old mode 100644 new mode 100755 diff --git a/plugins/minecraft/minecraft-users b/plugins/minecraft/minecraft-users index ecfa641b..c61d0aa8 100755 --- a/plugins/minecraft/minecraft-users +++ b/plugins/minecraft/minecraft-users @@ -1,4 +1,4 @@ -#!/usr/local/bin/ruby +#!/usr/bin/env ruby # Config: # [minecraft_users] # env.host awesomeserver.com @@ -8,29 +8,29 @@ require 'socket' if ARGV[0] == 'config' - puts "graph_title Connected players" - puts "graph_vlabel players" - puts "players.label players" - puts "graph_info Number of players connected to Minecraft" - puts "graph_category games" - exit + puts 'graph_title Connected players' + puts 'graph_vlabel players' + puts 'players.label players' + puts 'graph_info Number of players connected to Minecraft' + puts 'graph_category games' + exit end host = ENV['host'] -host = 'localhost' unless host +host ||= 'localhost' port = ENV['port'] -port = '25566' unless port +port ||= '25566' socket = TCPSocket.new(host, port) -socket.puts "QUERY" +socket.puts 'QUERY' response = socket.read response = response.split("\n") -server_port = response[0].split(" ", 2)[1].to_i -player_count = response[1].split(" ", 2)[1].to_i -max_players = response[2].split(" ", 2)[1].to_i -player_list = response[3].split(" ", 2)[1].chomp[1..-2] +server_port = response[0].split(' ', 2)[1].to_i +player_count = response[1].split(' ', 2)[1].to_i +max_players = response[2].split(' ', 2)[1].to_i +player_list = response[3].split(' ', 2)[1].chomp[1..-2] puts "players.value #{player_count}" diff --git a/plugins/minecraft/minecraft-users-ram_ b/plugins/minecraft/minecraft-users-ram_ old mode 100644 new mode 100755 index 9db52074..fda6e67c --- a/plugins/minecraft/minecraft-users-ram_ +++ b/plugins/minecraft/minecraft-users-ram_ @@ -1,30 +1,63 @@ #!/bin/bash -# I'm not the author, just wanted to add this plugin to github -# Author & instructions: http://wiki.natenom.name/minecraft/munin-plugin -LC_LANG=C +: <<=cut + +=head1 NAME + +minecraft-users-ram_ - monitor resource usage of a local minecraft server + +=head1 APPLICABLE SYSTEMS + +Every system with a running minecraft server. + + +=head1 USAGE + +Symlink this plugin to /etc/munin/plugins/ by adding the TCP port number used +by the local mincecraft server. Afterwards restart the munin-node. + +Some more instructions: http://wiki.natenom.name/minecraft/munin-plugin + + +=head1 AUTHOR + + 2011 Natenom + 2017 Leandro Späth + + +=head1 MAGIC MARKERS + + #%# family=manual + #%# capabilities= + +=cut + + MC_PORT=${0##*_} - + if [ "$1" = "config" ] then - printf 'graph_title Minecraft-Server (Port %s)\n' ${MC_PORT} - printf 'graph_category games' - printf 'graph_vlabel Anzahl\n' - printf 'users.label Benutzer\n' - printf 'ramusage.label Verwendeter RAM in GiB\n' + echo "graph_title Minecraft-Server (Port $MC_PORT)" + echo 'graph_category games' + echo 'graph_scale no' + echo 'graph_vlabel Players / RAM / CPU' + echo 'users.label Players' + echo 'ramusage.label RAM usage in GiB' + echo 'cpuusage.label CPU usage' exit 0 fi -PLAYERCOUNT=$(netstat -tn | grep -i ${MC_PORT} | grep ESTABLISHED | wc -l) -MC_PID=$(netstat -tlpn | grep ${MC_PORT} | sed -n -e '1p' | awk '{ print $7 }' | cut -d'/' -f1) -if [ ! -z "${MC_PID}" ] +PLAYERCOUNT=$(LC_LANG=C netstat -tn | grep ":$MC_PORT " | grep ESTABLISHED | wc -l) +MC_PID=$(netstat -tlpn | grep ":$MC_PORT " | sed -n -e '1p' | awk '{ print $7 }' | cut -d '/' -f1) +if [ -n "${MC_PID}" ] then #is running - MEMORYRSS=$(ps -p ${MC_PID} -o rss | cut -d' -' -f2) - MEMGiB=$(echo "scale=2;${MEMORYRSS}/1024/1024" | bc -l) + MEMGiB=$(ps -p "$MC_PID" -o rss | sed -n '2p' | awk '{ print $1 / 1024 / 1024 }') + CPU=$(top -bp "$MC_PID" -n 1 | sed -n '$p' | awk '{ print $10 / 100; }') else - MEMGiB=0 + MEMGiB="U" + CPU="U" fi -printf "users.value %i\n" "${PLAYERCOUNT}" -printf "ramusage.value %3.2f\n" "${MEMGiB}" +echo "users.value $PLAYERCOUNT" +echo "ramusage.value $MEMGiB" +echo "cpuusage.value $CPU" diff --git a/plugins/moblock/moblock_connections b/plugins/moblock/moblock_connections index 6f163e9c..9489e953 100755 --- a/plugins/moblock/moblock_connections +++ b/plugins/moblock/moblock_connections @@ -1,78 +1,82 @@ #!/usr/bin/env ruby -# -# Plugin to monitor the number of connections blocked by moblock. -# -# Requirements: -# -# Moblock up and running with generated log files going to /var/log/moblock -# -# Parameters supported: -# -# config -# autoconf -# -# Configurable variables -# -# logfile - Override default moblock logfile -# -# Magic markers -# -#%# family=auto + +=begin + +Plugin to monitor the number of connections blocked by moblock. + +Requirements: + + Moblock up and running with generated log files going to /var/log/moblock + +Parameters supported: + + config + autoconf + +Configurable variables + + logfile - Override default moblock logfile + +Magic markers + +#%# family=auto #%# capabilities=autoconf +=end + # # Initialize vars # -$logfile = ENV['logfile'] || "/var/log/moblock.log" +$logfile = ENV['logfile'] || '/var/log/moblock.log' # # Configure generated graph # def config - puts "graph_args --base 1000 -r --lower-limit 0" - puts "graph_title Moblock" - puts "graph_vlabel Blocked Connections" - puts "graph_category fw" - puts "graph_info This graph shows the number of connections blocked by Moblock" + puts 'graph_args --base 1000 -r --lower-limit 0' + puts 'graph_title Moblock' + puts 'graph_vlabel Blocked Connections' + puts 'graph_category fw' + puts 'graph_info This graph shows the number of connections blocked by Moblock' - puts "blocked_in.label Blocked In" - puts "blocked_in.draw LINE1" - puts "blocked_in.info Number of blocked incoming connections" - puts "blocked_in.type GAUGE" + puts 'blocked_in.label Blocked In' + puts 'blocked_in.draw LINE1' + puts 'blocked_in.info Number of blocked incoming connections' + puts 'blocked_in.type GAUGE' - puts "blocked_out.label Blocked Out" - puts "blocked_out.draw LINE1" - puts "blocked_out.info Number of blocked outgoing connections" - puts "blocked_out.type GAUGE" + puts 'blocked_out.label Blocked Out' + puts 'blocked_out.draw LINE1' + puts 'blocked_out.info Number of blocked outgoing connections' + puts 'blocked_out.type GAUGE' - puts "blocked_total.label Total Blocked" - puts "blocked_total.draw LINE1" - puts "blocked_total.info Total Number of blocked connections" - puts "blocked_total.type GAUGE" + puts 'blocked_total.label Total Blocked' + puts 'blocked_total.draw LINE1' + puts 'blocked_total.info Total Number of blocked connections' + puts 'blocked_total.type GAUGE' end # # Grep moblock logs for stats # -def fetch(debug=false) - num_in = %x{cat #{$logfile} | grep --extended-regexp 'IN: ' | wc -l} - num_out = %x{cat #{$logfile} | grep --extended-regexp 'OUT: ' | wc -l} +def fetch(_debug = false) + num_in = `cat #{$logfile} | grep --extended-regexp 'IN: ' | wc -l` + num_out = `cat #{$logfile} | grep --extended-regexp 'OUT: ' | wc -l` num_total = num_in.to_i + num_out.to_i puts "blocked_in.value #{num_in}" puts "blocked_out.value #{num_out}" puts "blocked_total.value #{num_total}" end - + # # If moblock executable on path then allow autoconfiguration # def autoconf - moblock_path = %x{which moblock} + moblock_path = `which moblock` if moblock_path.index('moblock') - puts "yes" + puts 'yes' else - puts "no" + puts 'no' end end @@ -80,12 +84,12 @@ end # Handle command line args # case ARGV.first - when 'config' - config - when 'debug' - fetch true - when 'autoconf' - autoconf - else - fetch +when 'config' + config +when 'debug' + fetch true +when 'autoconf' + autoconf +else + fetch end diff --git a/plugins/mod_jk/mod_jk b/plugins/mod_jk/mod_jk index f4358c0a..dfe1dd0e 100755 --- a/plugins/mod_jk/mod_jk +++ b/plugins/mod_jk/mod_jk @@ -81,7 +81,7 @@ for i in `echo $data | sed 's/worker/\nworker/g' | grep "^worker\..*\.state=.*"` do node=`echo $i | awk -F . '{print $2}'` status=`echo $i | awk -F = '{print $2}'` - + value=0 if [ `echo $status | sed 's/^OK/XOK/'` = "X$status" ] then @@ -97,7 +97,7 @@ do then value=3 fi - + echo "$node.value $value" done diff --git a/plugins/mogilefs/mogilefsd_activity b/plugins/mogilefs/mogilefsd_activity index a53edce6..5a0cf760 100755 --- a/plugins/mogilefs/mogilefsd_activity +++ b/plugins/mogilefs/mogilefsd_activity @@ -24,7 +24,7 @@ # Usage: # ln -s /usr/share/munin/plugins/mogilefsd_activity \ # /etc/munin/plugins/ -# +# # Configuration variables: # # host (default: '127.0.0.1') @@ -61,7 +61,7 @@ sub autoconf { if ($ret) { print "no ($ret)\n"; - exit 1; + exit 0; } my $conn = IO::Socket::INET->new(PeerAddr => $host, @@ -72,7 +72,7 @@ sub autoconf { if (!$conn) { print "no (could not connect: $!)\n"; - exit 1; + exit 0; } my $request = "!stats\n"; @@ -135,10 +135,10 @@ if($ARGV[0] and $ARGV[0] eq "config") { print "pending_queries.draw STACK\n"; exit 0; -} - +} + print %states; - + &query_mogilefsd($mogilefsd_host, $mogilefsd_port); foreach $key (@known_states) { diff --git a/plugins/mogilefs/mogilefsd_queries b/plugins/mogilefs/mogilefsd_queries index ac433ac0..16a79dd5 100755 --- a/plugins/mogilefs/mogilefsd_queries +++ b/plugins/mogilefs/mogilefsd_queries @@ -24,7 +24,7 @@ # Usage: # ln -s /usr/share/munin/plugins/mogilefsd_activity \ # /etc/munin/plugins/ -# +# # Configuration variables: # # host (default: '127.0.0.1') @@ -61,7 +61,7 @@ sub autoconf { if ($ret) { print "no ($ret)\n"; - exit 1; + exit 0; } my $conn = IO::Socket::INET->new(PeerAddr => $host, @@ -72,7 +72,7 @@ sub autoconf { if (!$conn) { print "no (could not connect: $!)\n"; - exit 1; + exit 0; } my $request = "!stats\n"; @@ -133,10 +133,10 @@ if($ARGV[0] and $ARGV[0] eq "config") { print "queries.min 0\n"; exit 0; -} - +} + print %states; - + &query_mogilefsd($mogilefsd_host, $mogilefsd_port); foreach $key (@known_states) { diff --git a/plugins/moinmoin/moinoin_pages b/plugins/moinmoin/moinoin_pages index bc21068a..2a2ec92d 100755 --- a/plugins/moinmoin/moinoin_pages +++ b/plugins/moinmoin/moinoin_pages @@ -15,7 +15,7 @@ # # Implementation notes # -------------------- -# +# # it is quite koumbit-specific: # 1. the wikifarm config is hardcoded # 2. it relies on the "wikilist.py" file to contain the list of wiki -> url patterns @@ -81,25 +81,25 @@ def main(): url = sub('\(([^\|]*)(\|[^\)]*\))+', '\\1', url) # remove common regexp patterns and slap a protocol to make this a real url url = sub('[\^\$]|(\.\*)', '', url) - + mod = getattr(__import__(name), 'Config') #print "Upgradeing wiki %s (%s)" % (getattr(mod, 'sitename'), url) - + request = RequestCLI(url) pagelist = request.rootpage.getPageList(user='') - + systemPages = [page for page in pagelist if wikiutil.isSystemPage(request, page)] print(name + '.value ' + str(len(pagelist)-len(systemPages))) #totalsize = reduce(operator.add, [Page(request, name).size() for name in pagelist]) - #print('Accumulated page sizes' + _formatInReadableUnits(totalsize)) + #print('Accumulated page sizes' + _formatInReadableUnits(totalsize)) def config(): print("""graph_title Wiki size graph_vlabel Number of pages graph_args --base 1000 -l 0 graph_scale no -graph_category Wiki +graph_category wiki graph_info The number of pages excludes system pages but includes ACL-protected pages.""") for wiki in wikis: name = wiki[0] diff --git a/plugins/mongodb/mongo_btree b/plugins/mongodb/mongo_btree index bddd972a..ea27faaa 100755 --- a/plugins/mongodb/mongo_btree +++ b/plugins/mongodb/mongo_btree @@ -1,6 +1,22 @@ -#!/usr/bin/python +#!/usr/bin/env python +""" +=head1 NAME + MongoDB btree Plugin -## GENERATED FILE - DO NOT EDIT +=head1 APPLICABLE SYSTEMS + + Works until MongoDB 2.7. The "indexCounters" field was removed in 2.8 version. + +=head1 CONFIGURATION + + [mongo_btree] + env.MONGO_DB_URI mongodb://user:password@host:port/dbname + +=head1 AUTHOR + + Original script there : https://github.com/comerford/mongo-munin + Doc added by Alban Espie-Guillon +""" import urllib2 import sys @@ -50,5 +66,3 @@ if __name__ == "__main__": doConfig() else: doData() - - diff --git a/plugins/mongodb/mongo_collection_ b/plugins/mongodb/mongo_collection_ old mode 100644 new mode 100755 index 229435fe..37d939e7 --- a/plugins/mongodb/mongo_collection_ +++ b/plugins/mongodb/mongo_collection_ @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # vim: set sts=4 sw=4 encoding=utf-8 @@ -29,8 +29,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#%# family=auto -#%# capabilities=suggest autoconf +# #%# family=auto +# #%# capabilities=suggest autoconf import pymongo @@ -40,7 +40,7 @@ settings_host = '127.0.0.1' settings_port = 27017 # mongodb_uri will override host and port settings_mongodb_uri = '' -settings_db = 'mydb' +settings_db = 'admin' settings_user = '' settings_password = '' settings_ignoredb = {} @@ -88,98 +88,97 @@ typeIndex['indexsize']['category'] = 'db' def getCollstats(graphtype): - if settings_mongodb_uri: - con = pymongo.MongoClient(settings_mongodb_uri) - else: - con = pymongo.MongoClient(settings_host, int(settings_port)) + if settings_mongodb_uri: + con = pymongo.MongoClient(settings_mongodb_uri) + else: + con = pymongo.MongoClient(settings_host, int(settings_port)) - if settings_user: - db = con['admin'] - db.authenticate(settings_user, settings_password) + if settings_user: + db = con[settings_db] + db.authenticate(settings_user, settings_password) - stats_tmp = {} - for dbname in con.database_names(): - if dbname in settings_ignoredb: - continue - db = con[dbname] - for coll in db.collection_names(): - if coll.startswith('system.'): - continue - stats = db.command("collstats", coll) - collname = dbname + "_" + coll.replace('.', '_') - if not stats_tmp.has_key(collname): - stats_tmp[collname] = {} - stats_tmp[collname]['value'] = 0 - stats_tmp[collname]['dbname'] = dbname - if typeIndex[graphtype]['index'] in stats: - stats_tmp[collname]['value'] += long(stats[typeIndex[graphtype]['index']]) + stats_tmp = {} + for dbname in con.list_database_names(): + if dbname in settings_ignoredb: + continue + db = con[dbname] + for coll in db.list_collection_names(): + if coll.startswith('system.'): + continue + stats = db.command("collstats", coll) + collname = dbname + "_" + coll.replace('.', '_') + if collname not in stats_tmp: + stats_tmp[collname] = {} + stats_tmp[collname]['value'] = 0 + stats_tmp[collname]['dbname'] = dbname + if typeIndex[graphtype]['index'] in stats: + stats_tmp[collname]['value'] += int(stats[typeIndex[graphtype]['index']]) + + con.close() + + for collname, item in sorted(stats_tmp.items()): + yield ("%s" % collname, item['value'], item['dbname']) - con.close() - - for collname, item in sorted(stats_tmp.items()): - yield ("%s" % collname, item['value'], item['dbname']) - - -def doData(base,graphtype): +def doData(base, graphtype): lastdb = "" for coll, stats, db in sorted(getCollstats(graphtype), key=itemgetter(2)): if lastdb != db: - print "multigraph " + base + "_" + graphtype + "_" + db + print("multigraph " + base + "_" + graphtype + "_" + db) lastdb = db - print "%s_%s.value %s" % (graphtype, coll, stats) + print("%s_%s.value %s" % (graphtype, coll, stats)) -def doConfig(base,graphtype): +def doConfig(base, graphtype): lastdb = "" - for k,v,d in sorted(getCollstats(graphtype), key=itemgetter(2)): + for k, v, d in sorted(getCollstats(graphtype), key=itemgetter(2)): if lastdb != d: - print "multigraph " + base + "_" + graphtype + "_" + d + print("multigraph " + base + "_" + graphtype + "_" + d) lastdb = d -# print "graph_total total" - print "graph_title MongoDB " + typeIndex[graphtype]['title'] + " for database " + d - print "graph_args --base " + typeIndex[graphtype]['base'] + " " + typeIndex[graphtype]['scale'] - print "graph_vlabel " + typeIndex[graphtype]['yaxis'] - print "graph_category db" - print "%s_%s.label %s" % (graphtype, k, k) - print "%s_%s.min 0" % (graphtype, k) - print "%s_%s.draw LINE1" % (graphtype, k) + print("graph_title MongoDB " + typeIndex[graphtype]['title'] + " for database " + d) + print("graph_args --base " + typeIndex[graphtype]['base'] + " " + typeIndex[graphtype]['scale']) + print("graph_vlabel " + typeIndex[graphtype]['yaxis']) + print("graph_category db") + print("%s_%s.label %s" % (graphtype, k, k)) + print("%s_%s.min 0" % (graphtype, k)) + print("%s_%s.draw LINE1" % (graphtype, k)) + def doSuggest(): - print "keys" - for k in typeIndex.keys(): - print k + print("keys") + for k in typeIndex.keys(): + print(k) if __name__ == "__main__": - from sys import argv,exit - from os import environ,path + from sys import argv, exit + from os import environ, path import re # Could be done by a for loop # but i think if's are faster - if 'HOST' in environ: - settings_host = environ['HOST'] - if 'PORT' in environ: - settings_port = environ['PORT'] - if 'DB' in environ: - settings_db = environ['DB'] - if 'MONGO_USER' in environ: - settings_user = environ['MONGO_USER'] - if 'PASSWORD' in environ: - settings_password = environ['PASSWORD'] - if 'IGNOREDB' in environ: - settings_ignoredb = environ['IGNOREDB'].split(',') + if 'host' in environ: + settings_host = environ['host'] + if 'port' in environ: + settings_port = environ['port'] + if 'db' in environ: + settings_db = environ['db'] + if 'username' in environ: + settings_user = environ['username'] + if 'password' in environ: + settings_password = environ['password'] + if 'ignoredb' in environ: + settings_ignoredb = environ['ignoredb'].split(',') m = re.search('^(.*)_([a-zA-Z0-9]*)$', path.basename(argv[0])) if len(argv) < 2: - doData(m.group(1),m.group(2)) + doData(m.group(1), m.group(2)) elif argv[1] == "config": - doConfig(m.group(1),m.group(2)) + doConfig(m.group(1), m.group(2)) elif argv[1] == "autoconf": - print "yes" + print("yes") elif argv[1] == "suggest": doSuggest() else: - print "invalid argument" + print("invalid argument") exit(1) diff --git a/plugins/mongodb/mongo_conn b/plugins/mongodb/mongo_conn index 62324c65..727c2cda 100755 --- a/plugins/mongodb/mongo_conn +++ b/plugins/mongodb/mongo_conn @@ -1,6 +1,22 @@ -#!/usr/bin/python +#!/usr/bin/env python +""" +=head1 NAME + MongoDB connections Plugin -## GENERATED FILE - DO NOT EDIT +=head1 APPLICABLE SYSTEMS + + Works until MongoDB 3.6. The httpinterface was later removed. + +=head1 CONFIGURATION + + [mongo_lock] + env.MONGO_DB_URI mongodb://user:password@host:port/dbname + +=head1 AUTHOR + + Original script there : https://github.com/comerford/mongo-munin + Doc added by Alban Espie-Guillon +""" import urllib2 import sys @@ -40,5 +56,3 @@ if __name__ == "__main__": doConfig() else: doData() - - diff --git a/plugins/mongodb/mongo_lag b/plugins/mongodb/mongo_lag index 80fdfbd2..d07667eb 100755 --- a/plugins/mongodb/mongo_lag +++ b/plugins/mongodb/mongo_lag @@ -1,22 +1,38 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ +=head1 NAME MongoDB Replication Lag - ~~~~~~~~~~~~~~~~~~~~~~~ Connects to a single mongo instance and retrieve replication lag for all connected members. - munin-node.conf: - [mongodb_lag] +=head1 APPLICABLE SYSTEMS + + MongoDB 3.X and 4.X with pymongo installed. + +=head1 CONFIGURATION + + munin-node.conf + defaults for host is 127.0.0.1 and port 27017 + and will work without being defined : + + [mongo_lag] env.host 127.0.0.1 env.port 27017 + env.username user + env.password P@55w0rd - :author: Stefan Andersen - :license: The Beer Ware License (Revision 42) - wrote this file. As long - as you retain this notice you can do whatever you want - with this stuff. If we meet some day, and you think - this stuff is worth it, you can buy me a beer in return. +=head1 AUTHOR + + Stefan Andersen + Updated by Alban Espie-Guillon + +=head1 LICENSE + The Beer Ware License (Revision 42) + wrote this file. As long + as you retain this notice you can do whatever you want + with this stuff. If we meet some day, and you think + this stuff is worth it, you can buy me a beer in return. """ import os import sys @@ -25,7 +41,13 @@ import pymongo def _get_members(): host = os.environ.get('host', '127.0.0.1') port = os.environ.get('port', 27017) - conn = pymongo.MongoClient(host,port) + username = os.environ.get('username', '') + password = os.environ.get('password', '') + conn = pymongo.MongoClient(host, int(port)) + if username: + connAuth = conn['admin'] + connAuth.authenticate(username, password) + repl_status = conn.admin.command("replSetGetStatus") members = {} @@ -43,17 +65,17 @@ def run(): for member in members: lag = (primary_optime - members[member]['optimeDate']).seconds - print "{0}.value {1}".format(member, lag) + print("{0}.value {1}".format(member, lag)) def config(): - print """graph_title MongoDB replication lag + print("""graph_title MongoDB replication lag graph_args --base 1000 graph_vlabel Replication lag (seconds) graph_category db -""" - +""") + for member in _get_members(): - print "{0}.label {0}".format(member) + print("{0}.label {0}".format(member)) if __name__ == "__main__": if len(sys.argv) > 1 and sys.argv[1] == "config": diff --git a/plugins/mongodb/mongo_lock b/plugins/mongodb/mongo_lock index ef490d7d..9bd84791 100755 --- a/plugins/mongodb/mongo_lock +++ b/plugins/mongodb/mongo_lock @@ -1,6 +1,22 @@ -#!/usr/bin/python +#!/usr/bin/env python +""" +=head1 NAME + MongoDB lock Plugin -## GENERATED FILE - DO NOT EDIT +=head1 APPLICABLE SYSTEMS + + MongoDB 2.X. The "lockTime" field was removed in later versions. + +=head1 CONFIGURATION + + [mongo_lock] + env.MONGO_DB_URI mongodb://user:password@host:port/dbname + +=head1 AUTHOR + + Original script there : https://github.com/comerford/mongo-munin + Doc added by Alban Espie-Guillon +""" import urllib2 import sys @@ -47,5 +63,3 @@ if __name__ == "__main__": doConfig() else: doData() - - diff --git a/plugins/mongodb/mongo_mem b/plugins/mongodb/mongo_mem index 8f720602..672a89c2 100755 --- a/plugins/mongodb/mongo_mem +++ b/plugins/mongodb/mongo_mem @@ -1,49 +1,83 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +""" +=head1 NAME + MongoDB mem Plugin -## GENERATED FILE - DO NOT EDIT +=head1 APPLICABLE SYSTEMS -import urllib2 + MongoDB 3.X and 4.X with pymongo installed. + +=head1 CONFIGURATION + + munin-node.conf + defaults for host is 127.0.0.1 and port 27017 + and will work without being defined : + + [mongo_mem] + env.host 127.0.0.1 + env.port 27017 + env.username user + env.password P@55w0rd + env.db dbname + + or + + [mongodb_mem] + env.MONGO_DB_URI mongodb://user:password@host:port/dbname + +=head1 AUTHOR + + Original script there : https://github.com/comerford/mongo-munin + Updated by Alban Espie-Guillon +""" import sys - -try: - import json -except ImportError: - import simplejson as json - +import os +import pymongo def getServerStatus(): - raw = urllib2.urlopen( "http://127.0.0.1:28017/_status" ).read() - return json.loads( raw )["serverStatus"] + if 'MONGO_DB_URI' in os.environ: + c = pymongo.MongoClient(os.environ['MONGO_DB_URI']) + + elif 'username' and 'password' in os.environ: + host = os.environ.get('host', '127.0.0.1') + port = os.environ.get('port', 27017) + username = os.environ.get('username', '') + password = os.environ.get('password', '') + c = pymongo.MongoClient(host, int(port)) + if username: + cAuth = c['admin'] + cAuth.authenticate(username, password) + + else: + c = pymongo.MongoClient() + + return c.admin.command('serverStatus', workingSet=True) def ok(s): return s == "resident" or s == "virtual" or s == "mapped" def doData(): - for k,v in getServerStatus()["mem"].iteritems(): + for k,v in getServerStatus()["mem"].items(): if ok(k): print( str(k) + ".value " + str(v * 1024 * 1024) ) -def doConfig(): - print "graph_title MongoDB memory usage" - print "graph_args --base 1024 -l 0 --vertical-label Bytes" - print "graph_category db" +def doConfig(): + print(""" +graph_title MongoDB memory usage +graph_args --base 1024 -l 0 --vertical-label Bytes +graph_category mongodb +""") for k in getServerStatus()["mem"]: if ok( k ): - print k + ".label " + k - print k + ".draw LINE1" - - - - - + print(k + ".label " + k) + print(k + ".draw LINE1") if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "config": doConfig() else: doData() - - diff --git a/plugins/mongodb/mongo_ops b/plugins/mongodb/mongo_ops index 15f6b8b9..1247fa60 100755 --- a/plugins/mongodb/mongo_ops +++ b/plugins/mongodb/mongo_ops @@ -1,45 +1,85 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +""" +=head1 NAME + MongoDB ops Plugin -## GENERATED FILE - DO NOT EDIT +=head1 APPLICABLE SYSTEMS -import urllib2 + MongoDB 3.X and 4.X with pymongo installed. + +=head1 CONFIGURATION + + munin-node.conf + defaults for host is 127.0.0.1 and port 27017 + and will work without being defined : + + [mongodb_ops] + env.host 127.0.0.1 + env.port 27017 + env.username user + env.password P@55w0rd + env.db dbname + + or + + [mongodb_ops] + env.MONGO_DB_URI mongodb://user:password@host:port/dbname + +=head1 AUTHOR + + Original script there : https://github.com/comerford/mongo-munin + Updated by Alban Espie-Guillon +""" import sys - -try: - import json -except ImportError: - import simplejson as json - +import os +import pymongo def getServerStatus(): - raw = urllib2.urlopen( "http://127.0.0.1:28017/_status" ).read() - return json.loads( raw )["serverStatus"] + if 'MONGO_DB_URI' in os.environ: + c = pymongo.MongoClient(os.environ['MONGO_DB_URI']) + + elif 'username' and 'password' in os.environ: + host = os.environ.get('host', '127.0.0.1') + port = os.environ.get('port', 27017) + username = os.environ.get('username', '') + password = os.environ.get('password', '') + c = pymongo.MongoClient(host, int(port)) + if username: + cAuth = c['admin'] + cAuth.authenticate(username, password) + + else: + c = pymongo.MongoClient() + + return c.admin.command('serverStatus', workingSet=True) def doData(): ss = getServerStatus() - for k,v in ss["opcounters"].iteritems(): + for k,v in ss["opcounters"].items(): print( str(k) + ".value " + str(v) ) -def doConfig(): - print "graph_title MongoDB ops" - print "graph_args --base 1000 -l 0" - print "graph_vlabel ops / ${graph_period}" - print "graph_category db" - print "graph_total total" +def doConfig(): + print(""" +graph_title MongoDB ops +graph_args --base 1000 -l 0 +graph_vlabel ops / ${graph_period} +graph_category db +graph_total total +""") for k in getServerStatus()["opcounters"]: - print k + ".label " + k - print k + ".min 0" - print k + ".type COUNTER" - print k + ".max 500000" - print k + ".draw LINE1" + print(k + ".label " + k) + print(k + ".min 0") + print(k + ".type COUNTER") + print(k + ".max 500000") + print(k + ".draw LINE1") + if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "config": doConfig() else: doData() - - diff --git a/plugins/mongodb/mongodb_conn b/plugins/mongodb/mongodb_conn new file mode 100755 index 00000000..f0643adf --- /dev/null +++ b/plugins/mongodb/mongodb_conn @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +""" +=head1 NAME + MongoDB Connection Count Plugin + +=head1 APPLICABLE SYSTEMS + + MongoDB 3.X and 4.X with pymongo installed. + +=head1 CONFIGURATION + + munin-node.conf + defaults for host is 127.0.0.1 and port 27017 + and will work without being defined : + + [mongodb_conn] + env.host 127.0.0.1 + env.port 27017 + env.username user + env.password P@55w0rd + +=head1 AUTHOR + + Alban Espie-Guillon + based on Stefan Andersen work. + +=head1 LICENSE + The Beer Ware License (Revision 42) + wrote this file. As long + as you retain this notice you can do whatever you want + with this stuff. If we meet some day, and you think + this stuff is worth it, you can buy me a beer in return. +""" +import os +import sys +import pymongo + + +def _get_connections(): + host = os.environ.get('host', '127.0.0.1') + port = os.environ.get('port', 27017) + username = os.environ.get('username', '') + password = os.environ.get('password', '') + conn = pymongo.MongoClient(host, int(port)) + if username: + connAuth = conn['admin'] + connAuth.authenticate(username, password) + + """ cli : db.serverStatus().connections """ + conn_status = conn.admin.command("serverStatus")['connections'] + return conn_status + + +def run(): + connections = _get_connections() + for c, v in connections.items(): + print(str(c) + ".value " + str(v)) + + +def config(): + print(""" +graph_title MongoDB Connections Count +graph_vlabel Connections count +graph_category db +graph_args --base 1000 -l 0 +current.label current +current.draw AREASTACK +available.label available +available.draw AREASTACK +active.label active +active.draw AREASTACK +""") + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "config": + config() + else: + run() diff --git a/plugins/mongodb/mongodb_cpu b/plugins/mongodb/mongodb_cpu new file mode 100755 index 00000000..a2fa08de --- /dev/null +++ b/plugins/mongodb/mongodb_cpu @@ -0,0 +1,77 @@ +#!/usr/bin/perl -w + +# Author : Alban Espie-Guillon +# Based on http://github.com/perusio/nginx-munin Author: António P. P. Almeida + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# Except as contained in this notice, the name(s) of the above copyright +# holders shall not be used in advertising or otherwise to promote the sale, +# use or other dealings in this Software without prior written authorization. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +=head1 NAME + +mongodb_cpu - Munin plugin to show CPU used by mongodb. + +=encoding utf8 + +=head1 APPLICABLE SYSTEMS + +Any mongodb host + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 VERSION + +1.0 + +=head1 BUGS + +None known + +=head1 AUTHOR + +Based on a script by António P. P. Almeida . Modified by Alban Espie-Guillon + +=head1 LICENSE + +MIT + +=cut + +## Munin config method. +if (exists $ARGV[0] and $ARGV[0] eq "config") { + print "graph_title Mongodb CPU usage\n"; + print "graph_vlabel CPU\n"; + print "graph_category db\n"; + print "graph_args --base 1000 -r --lower-limit 0\n"; + print "graph_scale no\n"; + print "graph_period second\n"; + print "cpu.label CPU\n"; + print "cpu.type GAUGE\n"; + print "cpu.draw AREASTACK\n"; + print "cpu.min 0\n"; + exit 0; +} else { + my $c = `ps u -p \$(pidof mongod) | awk 'NR > 1 {print \$3}'`; + print "cpu.value $c"; +} diff --git a/plugins/mongodb/mongodb_docs b/plugins/mongodb/mongodb_docs new file mode 100755 index 00000000..43541055 --- /dev/null +++ b/plugins/mongodb/mongodb_docs @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +""" +=head1 NAME + MongoDB docs Plugin + +=head1 APPLICABLE SYSTEMS + + MongoDB 3.X and 4.X with pymongo installed. + +=head1 CONFIGURATION + + munin-node.conf + defaults for host is 127.0.0.1 and port 27017 + and will work without being defined : + + [mongodb_docs] + env.host 127.0.0.1 + env.port 27017 + env.username user + env.password P@55w0rd + env.db dbname + + or + + [mongodb_docs] + env.MONGO_DB_URI mongodb://user:password@host:port/dbname + +=head1 AUTHOR + + Original script there : https://github.com/comerford/mongo-munin + Updated by Alban Espie-Guillon +""" +import sys +import os +import pymongo + + +def getServerStatus(): + if 'MONGO_DB_URI' in os.environ: + c = pymongo.MongoClient(os.environ['MONGO_DB_URI']) + + elif 'username' and 'password' in os.environ: + host = os.environ.get('host', '127.0.0.1') + port = os.environ.get('port', 27017) + username = os.environ.get('username', '') + password = os.environ.get('password', '') + c = pymongo.MongoClient(host, int(port)) + if username: + cAuth = c['admin'] + cAuth.authenticate(username, password) + + else: + c = pymongo.MongoClient() + + return c.admin.command('serverStatus', workingSet=True) + + +def doData(): + ss = getServerStatus() + for k, v in ss["metrics"]["document"].items(): + print(str(k) + ".value " + str(v)) + + +def doConfig(): + print(""" +graph_title MongoDB documents +graph_args --base 1000 -l 0 +graph_vlabel documents +graph_category db +""") + + for k in getServerStatus()["metrics"]["document"]: + print(k + ".label " + k) + print(k + ".min 0") + print(k + ".type COUNTER") + print(k + ".max 500000") + print(k + ".draw LINE1") + + +if __name__ == "__main__": + + if len(sys.argv) > 1 and sys.argv[1] == "config": + doConfig() + else: + doData() diff --git a/plugins/mongodb/mongodb_ram b/plugins/mongodb/mongodb_ram new file mode 100755 index 00000000..ff00861e --- /dev/null +++ b/plugins/mongodb/mongodb_ram @@ -0,0 +1,75 @@ +#!/usr/bin/perl -w + +# Author : Alban Espie-Guillon +# Based on http://github.com/perusio/nginx-munin Author: António P. P. Almeida + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# Except as contained in this notice, the name(s) of the above copyright +# holders shall not be used in advertising or otherwise to promote the sale, +# use or other dealings in this Software without prior written authorization. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +=head1 NAME + +mongodb_ram - Munin plugin to show RAM used by mongodb. + +=encoding utf8 + +=head1 APPLICABLE SYSTEMS + +Any mongodb host + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 VERSION + +1.0 + +=head1 BUGS + +None known + +=head1 AUTHOR + +Based on a script by António P. P. Almeida . Modified by Alban Espie-Guillon + +=head1 LICENSE + +MIT + +=cut + +## Munin config method. +if (exists $ARGV[0] and $ARGV[0] eq "config") { + print "graph_title Mongodb RAM usage\n"; + print "graph_vlabel RAM\n"; + print "graph_category db\n"; + print "graph_args --base 1024 -l 0 --upper-limit 4138442752\n"; + print "ram.label RAM\n"; + print "ram.type GAUGE\n"; + print "ram.draw AREASTACK\n"; + print "ram.min 0\n"; + exit 0; +} else { + my $m = `ps u -p \$(pidof mongod) | awk 'NR > 1 {nm += \$5} END {print nm*1024}'`; + print "ram.value $m"; +} diff --git a/plugins/monit/monit_parser b/plugins/monit/monit_parser index 8cdb36b1..aefe5615 100755 --- a/plugins/monit/monit_parser +++ b/plugins/monit/monit_parser @@ -61,8 +61,8 @@ MONIT_XML_URL = ("http://{host}:{port}/_status?format=xml" def sanitize(s): - OK_CHARS = "abcdefghijklmnopqrstuvwxyz0123456789" - return "".join([char for char in s if char in OK_CHARS]) + ok_chars = "abcdefghijklmnopqrstuvwxyz0123456789" + return "".join([char for char in s if char in ok_chars]) def get_monit_status_xml(): @@ -76,7 +76,7 @@ def get_monit_status_xml(): auth_value = "Basic %s" % auth_base64_str req.add_header("Authorization", auth_value) conn = urllib.request.urlopen(req) - except urllib.error.URLError as exc: + except urllib.error.URLError: conn = None if conn is None: raise RuntimeError("Failed to open monit status URL: {}".format(MONIT_XML_URL)) diff --git a/plugins/moodle/moodle_files b/plugins/moodle/moodle_files old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_logs b/plugins/moodle/moodle_logs old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_mod_chat b/plugins/moodle/moodle_mod_chat old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_mod_forum b/plugins/moodle/moodle_mod_forum old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_mod_quiz b/plugins/moodle/moodle_mod_quiz old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_modules_total b/plugins/moodle/moodle_modules_total old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_users_online b/plugins/moodle/moodle_users_online old mode 100644 new mode 100755 diff --git a/plugins/moodle/moodle_users_total b/plugins/moodle/moodle_users_total old mode 100644 new mode 100755 diff --git a/plugins/mpd/mpdstats_songs-day.png b/plugins/mpd/example-graphs/mpdstats_-day.png similarity index 100% rename from plugins/mpd/mpdstats_songs-day.png rename to plugins/mpd/example-graphs/mpdstats_-day.png diff --git a/plugins/mpd/mpdstats_ b/plugins/mpd/mpdstats_ index f9d0360a..a3b336bc 100755 --- a/plugins/mpd/mpdstats_ +++ b/plugins/mpd/mpdstats_ @@ -83,15 +83,11 @@ ACTION="$(basename "$0" | sed 's/^.*_//')" do_autoconf () { if [ -z "$NCBIN" ] ; then echo "no (missing netcat program ('nc'))" - exit 1 - fi - - if ! echo version | "$NCBIN" "$MPDHOST" "$MPDPORT" >/dev/null 2>&1; then + elif ! echo version | "$NCBIN" "$MPDHOST" "$MPDPORT" >/dev/null 2>&1; then echo "no (connection failed)" - exit 1 + else + echo "yes" fi - - echo "yes" exit 0 } diff --git a/plugins/mssql/microsoft-sql b/plugins/mssql/microsoft-sql index 457298b1..e52348e3 100755 --- a/plugins/mssql/microsoft-sql +++ b/plugins/mssql/microsoft-sql @@ -1,49 +1,52 @@ -#! /usr/bin/ruby -# -# Munin Plugin for MSSQL - transaction monitoring -# -# Author: Wilfred Chau -# Date: 2011-05-18 -# Version: 1.0 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# Prerequistes: -# 1) /etc/odbc.ini and /etc/freetds.conf -# 2) rubygems -# 3) ruby-dbi -# -# Usage: -# 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) -# 2) chmod to allow executable to others -# 3) create symbolic link in /etc/munin/plugins -# ln -s /usr/share/munin/plugins/mssql_transaction.rb /etc/munin/plugins/mssql_transaction.rb -# -# Parameters: -# autoconf -# config (required) -# -# Config variables: -# sqluser : mssql user who has view server state privilege -# sqlpass : password for the mssql user -# dsn : datasource name as defined in /etc/odbc.ini -# instance: instance to monitor -# +#!/usr/bin/env ruby + +=begin + +Munin Plugin for MSSQL - transaction monitoring + +Author: Wilfred Chau +Date: 2011-05-18 +Version: 1.0 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Prerequistes: + 1) /etc/odbc.ini and /etc/freetds.conf + 2) rubygems + 3) ruby-dbi + +Usage: + 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) + 2) chmod to allow executable to others + 3) create symbolic link in /etc/munin/plugins + ln -s /usr/share/munin/plugins/mssql_transaction.rb /etc/munin/plugins/mssql_transaction.rb + +Parameters: + autoconf + config (required) + +Config variables: + sqluser : mssql user who has view server state privilege + sqlpass : password for the mssql user + dsn : datasource name as defined in /etc/odbc.ini + instance: instance to monitor + #%# family=auto #%# capabilities=autoconf +=end + require 'rubygems' require 'dbi' @@ -56,10 +59,10 @@ instance = 'AdventureWorks' # Queries # # -dbh = DBI.connect("DBI:ODBC:#{dsn}",sqluser,sqlpass) +dbh = DBI.connect("DBI:ODBC:#{dsn}", sqluser, sqlpass) -instance_name_query = "SELECT distinct instance_name - FROM sys.dm_os_performance_counters +instance_name_query = "SELECT distinct instance_name + FROM sys.dm_os_performance_counters WHERE instance_name = '#{instance}' and object_name = 'SQLServer:Databases' order by instance_name" @@ -69,44 +72,45 @@ transaction_query = "select cntr_value from sys.dm_os_performance_counters and object_name = 'SQLServer:Databases' and instance_name = ?" -all_instance_names = Array.new +all_instance_names = [] sth = dbh.execute(instance_name_query) sth.fetch do |row| - all_instance_names.push(row[0].strip) + all_instance_names.push(row[0].strip) end sth.finish # # autoconf # -if ARGV[0] == "autoconf" - if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 - puts "yes" - else - puts "no" - puts "Usage: #{__FILE__} autoconf|conf" - end - exit 0 +case ARGV[0] +when 'autoconf' + if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 + puts 'yes' + else + puts 'no' + puts "Usage: #{__FILE__} autoconf|conf" + end + exit 0 # # config definition # -elsif ARGV[0] == "config" - puts "graph_args --base 1000 -r --lower-limit 0" - puts "graph_title MSSQL Transactions/s" - puts "graph_category db" - puts "graph_info This graph shows transactions/s" - puts "graph_vlabel transactions/s" - puts "graph_scale no" - puts "graph_period second" +when 'config' + puts 'graph_args --base 1000 -r --lower-limit 0' + puts 'graph_title MSSQL Transactions/s' + puts 'graph_category db' + puts 'graph_info This graph shows transactions/s' + puts 'graph_vlabel transactions/s' + puts 'graph_scale no' + puts 'graph_period second' - all_instance_names.sort.each do |s| - puts "#{s}.label #{s}" - puts "#{s}.info INSTANCE: #{s}" - puts "#{s}.type DERIVE" - puts "#{s}.draw LINE1" - end + all_instance_names.sort.each do |s| + puts "#{s}.label #{s}" + puts "#{s}.info INSTANCE: #{s}" + puts "#{s}.type DERIVE" + puts "#{s}.draw LINE1" + end - exit 0 + exit 0 end # @@ -114,11 +118,11 @@ end # sth = dbh.prepare(transaction_query) all_instance_names.sort.each do |k| - sth.execute(k) - sth.fetch do |row| - # since type is DERIVE, need to convert value to integer then to string - puts "#{k.to_s}.value #{row[0].to_i.to_s}" - end + sth.execute(k) + sth.fetch do |row| + # since type is DERIVE, need to convert value to integer then to string + puts "#{k}.value #{row[0].to_i}" + end end sth.finish dbh.disconnect diff --git a/plugins/mssql/microsoft-sql-buffer-cache-hit-ratio b/plugins/mssql/microsoft-sql-buffer-cache-hit-ratio index ef48f336..b7b83d89 100755 --- a/plugins/mssql/microsoft-sql-buffer-cache-hit-ratio +++ b/plugins/mssql/microsoft-sql-buffer-cache-hit-ratio @@ -1,48 +1,51 @@ -#! /usr/bin/ruby -# -# Munin Plugin for MSSQL - Buffer cache hit ratio monitoring -# -# Author: Wilfred Chau -# Date: 2011-05-19 -# Version: 1.0 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# Prerequistes: -# 1) /etc/odbc.ini and /etc/freetds.conf -# 2) rubygems -# 3) ruby-dbi -# -# Usage: -# 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) -# 2) chmod to allow executable to others -# 3) create symbolic link in /etc/munin/plugins -# ln -s /usr/share/munin/plugins/mssql_buffercachehitratio.rb /etc/munin/plugins/mssql_buffercachehitratio.rb -# -# Parameters: -# autoconf -# config (required) -# -# Config variables: -# sqluser : mssql user who has view server state privilege -# sqlpass : password for the mssql user -# dsn : datasource name as defined in /etc/odbc.ini -# +#!/usr/bin/env ruby + +=begin + +Munin Plugin for MSSQL - Buffer cache hit ratio monitoring + +Author: Wilfred Chau +Date: 2011-05-19 +Version: 1.0 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Prerequistes: + 1) /etc/odbc.ini and /etc/freetds.conf + 2) rubygems + 3) ruby-dbi + +Usage: + 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) + 2) chmod to allow executable to others + 3) create symbolic link in /etc/munin/plugins + ln -s /usr/share/munin/plugins/mssql_buffercachehitratio.rb /etc/munin/plugins/mssql_buffercachehitratio.rb + +Parameters: + autoconf + config (required) + +Config variables: + sqluser : mssql user who has view server state privilege + sqlpass : password for the mssql user + dsn : datasource name as defined in /etc/odbc.ini + #%# family=auto #%# capabilities=autoconf +=end + require 'rubygems' require 'dbi' @@ -54,7 +57,7 @@ dsn = 'TESTSQL' # Queries # # -dbh = DBI.connect("DBI:ODBC:#{dsn}",sqluser,sqlpass) +dbh = DBI.connect("DBI:ODBC:#{dsn}", sqluser, sqlpass) buffercachehitratio_query = "select (a.cntr_value * 1.0 / b.cntr_value) * 100.0 from sys.dm_os_performance_counters a @@ -69,37 +72,38 @@ buffercachehitratio_query = "select (a.cntr_value * 1.0 / b.cntr_value) * 100.0 # # autoconf # -if ARGV[0] == "autoconf" - if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 - puts "yes" - else - puts "no" - puts "Usage: #{__FILE__} autoconf|conf" - end - exit 0 +case ARGV[0] +when 'autoconf' + if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 + puts 'yes' + else + puts 'no' + puts "Usage: #{__FILE__} autoconf|conf" + end + exit 0 # # config definition # -elsif ARGV[0] == "config" - puts "graph_args --base 1000 -r --lower-limit 0" - puts "graph_title MSSQL Buffer Cache Hit Ratio " - puts "graph_category db" - puts "graph_info This graph shows Buffer Cache Hit Ratio" - puts "graph_vlabel %" - puts "graph_scale no" - puts "graph_period second" +when 'config' + puts 'graph_args --base 1000 -r --lower-limit 0' + puts 'graph_title MSSQL Buffer Cache Hit Ratio ' + puts 'graph_category db' + puts 'graph_info This graph shows Buffer Cache Hit Ratio' + puts 'graph_vlabel %' + puts 'graph_scale no' + puts 'graph_period second' - puts "bc_hitratio.label BufferCacheHitRatio" - puts "bc_hitratio.info BufferCacheHitRatio" - puts "bc_hitratio.type GAUGE" - puts "bc_hitratio.draw LINE1" + puts 'bc_hitratio.label BufferCacheHitRatio' + puts 'bc_hitratio.info BufferCacheHitRatio' + puts 'bc_hitratio.type GAUGE' + puts 'bc_hitratio.draw LINE1' - exit 0 + exit 0 end sth = dbh.execute(buffercachehitratio_query) sth.fetch do |row| - puts "bc_hitratio.value #{row[0].strip.to_s}" + puts "bc_hitratio.value #{row[0].strip}" end sth.finish dbh.disconnect diff --git a/plugins/mssql/microsoft-sql-data-file-sizes b/plugins/mssql/microsoft-sql-data-file-sizes index 1e068e5d..915ebc2d 100755 --- a/plugins/mssql/microsoft-sql-data-file-sizes +++ b/plugins/mssql/microsoft-sql-data-file-sizes @@ -1,49 +1,52 @@ -#! /usr/bin/ruby -# -# Munin Plugin for MSSQL - Data file size monitoring -# -# Author: Wilfred Chau -# Date: 2011-05-19 -# Version: 1.0 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# Prerequistes: -# 1) /etc/odbc.ini and /etc/freetds.conf -# 2) rubygems -# 3) ruby-dbi -# -# Usage: -# 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) -# 2) chmod to allow executable to others -# 3) create symbolic link in /etc/munin/plugins -# ln -s /usr/share/munin/plugins/mssql_datafilesizes.rb /etc/munin/plugins/mssql_datafilesizes.rb -# -# Parameters: -# autoconf -# config (required) -# -# Config variables: -# sqluser : mssql user who has view server state privilege -# sqlpass : password for the mssql user -# dsn : datasource name as defined in /etc/odbc.ini -# instance: instance to monitor -# +#!/usr/bin/env ruby + +=begin + +Munin Plugin for MSSQL - Data file size monitoring + +Author: Wilfred Chau +Date: 2011-05-19 +Version: 1.0 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Prerequistes: + 1) /etc/odbc.ini and /etc/freetds.conf + 2) rubygems + 3) ruby-dbi + +Usage: + 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) + 2) chmod to allow executable to others + 3) create symbolic link in /etc/munin/plugins + ln -s /usr/share/munin/plugins/mssql_datafilesizes.rb /etc/munin/plugins/mssql_datafilesizes.rb + +Parameters: + autoconf + config (required) + +Config variables: + sqluser : mssql user who has view server state privilege + sqlpass : password for the mssql user + dsn : datasource name as defined in /etc/odbc.ini + instance: instance to monitor + #%# family=auto #%# capabilities=autoconf +=end + require 'rubygems' require 'dbi' @@ -56,9 +59,9 @@ instance = 'AdventureWorks' # Queries # # -dbh = DBI.connect("DBI:ODBC:#{dsn}",sqluser,sqlpass) +dbh = DBI.connect("DBI:ODBC:#{dsn}", sqluser, sqlpass) -instance_name_query = "SELECT distinct instance_name +instance_name_query = "SELECT distinct instance_name FROM sys.dm_os_performance_counters WHERE instance_name = '#{instance}'" @@ -67,44 +70,45 @@ transaction_query = "select cntr_value/1024.0 from sys.dm_os_performance_counter and object_name = 'SQLServer:Databases' and instance_name = ?" -all_instance_names = Array.new +all_instance_names = [] sth = dbh.execute(instance_name_query) sth.fetch do |row| - all_instance_names.push(row[0].strip) + all_instance_names.push(row[0].strip) end sth.finish # # autoconf # -if ARGV[0] == "autoconf" - if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 - puts "yes" - else - puts "no" - puts "Usage: #{__FILE__} autoconf|conf" - end - exit 0 +case ARGV[0] +when 'autoconf' + if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 + puts 'yes' + else + puts 'no' + puts "Usage: #{__FILE__} autoconf|conf" + end + exit 0 # # config definition # -elsif ARGV[0] == "config" - puts "graph_args --base 1024k -r --lower-limit 0" - puts "graph_title MSSQL DB File Sizes" - puts "graph_category db" - puts "graph_info This graph shows DB File Sizes (MB)" - puts "graph_vlabel MB" - puts "graph_scale no" - puts "graph_period second" +when 'config' + puts 'graph_args --base 1024k -r --lower-limit 0' + puts 'graph_title MSSQL DB File Sizes' + puts 'graph_category db' + puts 'graph_info This graph shows DB File Sizes (MB)' + puts 'graph_vlabel MB' + puts 'graph_scale no' + puts 'graph_period second' - all_instance_names.sort.each do |s| - puts "#{s}.label #{s}" - puts "#{s}.info INSTANCE: #{s}" - puts "#{s}.type GAUGE" - puts "#{s}.draw LINE1" - end + all_instance_names.sort.each do |s| + puts "#{s}.label #{s}" + puts "#{s}.info INSTANCE: #{s}" + puts "#{s}.type GAUGE" + puts "#{s}.draw LINE1" + end - exit 0 + exit 0 end # @@ -112,10 +116,10 @@ end # sth = dbh.prepare(transaction_query) all_instance_names.sort.each do |k| - sth.execute(k) - sth.fetch do |row| - puts "#{k.to_s}.value #{row[0].to_s}" - end + sth.execute(k) + sth.fetch do |row| + puts "#{k}.value #{row[0]}" + end end sth.finish dbh.disconnect diff --git a/plugins/mssql/microsoft-sql-log-file-size b/plugins/mssql/microsoft-sql-log-file-size index 45951c6a..813fac01 100755 --- a/plugins/mssql/microsoft-sql-log-file-size +++ b/plugins/mssql/microsoft-sql-log-file-size @@ -1,49 +1,52 @@ -#! /usr/bin/ruby -# -# Munin Plugin for MSSQL - log files monitoring -# -# Author: Wilfred Chau -# Date: 2011-05-19 -# Version: 1.0 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# Prerequistes: -# 1) /etc/odbc.ini and /etc/freetds.conf -# 2) rubygems -# 3) ruby-dbi -# -# Usage: -# 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) -# 2) chmod to allow executable to others -# 3) create symbolic link in /etc/munin/plugins -# ln -s /usr/share/munin/plugins/mssql_logfilesizes.rb /etc/munin/plugins/mssql_logfilesizes.rb -# -# Parameters: -# autoconf -# config (required) -# -# Config variables: -# sqluser : mssql user who has view server state privilege -# sqlpass : password for the mssql user -# dsn : datasource name as defined in /etc/odbc.ini -# instance: instance to monitor -# +#!/usr/bin/env ruby + +=begin + +Munin Plugin for MSSQL - log files monitoring + +Author: Wilfred Chau +Date: 2011-05-19 +Version: 1.0 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Prerequistes: + 1) /etc/odbc.ini and /etc/freetds.conf + 2) rubygems + 3) ruby-dbi + +Usage: + 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) + 2) chmod to allow executable to others + 3) create symbolic link in /etc/munin/plugins + ln -s /usr/share/munin/plugins/mssql_logfilesizes.rb /etc/munin/plugins/mssql_logfilesizes.rb + +Parameters: + autoconf + config (required) + +Config variables: + sqluser : mssql user who has view server state privilege + sqlpass : password for the mssql user + dsn : datasource name as defined in /etc/odbc.ini + instance: instance to monitor + #%# family=auto #%# capabilities=autoconf +=end + require 'rubygems' require 'dbi' @@ -56,9 +59,9 @@ instance = 'AdventureWorks' # Queries # # -dbh = DBI.connect("DBI:ODBC:#{dsn}",sqluser,sqlpass) +dbh = DBI.connect("DBI:ODBC:#{dsn}", sqluser, sqlpass) -instance_name_query = "SELECT distinct instance_name +instance_name_query = "SELECT distinct instance_name FROM sys.dm_os_performance_counters WHERE instance_name = '#{instance}'" @@ -67,52 +70,53 @@ logfilesize_query = "SELECT cntr_value/1024.0 from sys.dm_os_performance_counter AND object_name = 'SQLServer:Databases' AND instance_name = ?" -all_instance_names = Array.new +all_instance_names = [] sth = dbh.execute(instance_name_query) sth.fetch do |row| - all_instance_names.push(row[0].strip) + all_instance_names.push(row[0].strip) end sth.finish # # autoconf # -if ARGV[0] == "autoconf" - if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 - puts "yes" - else - puts "no" - puts "Usage: #{__FILE__} autoconf|conf" - end - exit 0 +case ARGV[0] +when 'autoconf' + if all_instance_names.length > 1 && sqluser.length > 1 && sqlpass.length > 1 + puts 'yes' + else + puts 'no' + puts "Usage: #{__FILE__} autoconf|conf" + end + exit 0 # # config definition # -elsif ARGV[0] == "config" - puts "graph_args --base 1024k -r --lower-limit 0" - puts "graph_title MSSQL DB Log File Sizes" - puts "graph_category db" - puts "graph_info This graph shows DB Log File Sizes (MB)" - puts "graph_vlabel MB" - puts "graph_scale no" - puts "graph_period second" +when 'config' + puts 'graph_args --base 1024k -r --lower-limit 0' + puts 'graph_title MSSQL DB Log File Sizes' + puts 'graph_category db' + puts 'graph_info This graph shows DB Log File Sizes (MB)' + puts 'graph_vlabel MB' + puts 'graph_scale no' + puts 'graph_period second' - all_instance_names.sort.each do |s| - puts "#{s}.label #{s}" - puts "#{s}.info INSTANCE: #{s}" - puts "#{s}.type GAUGE" - puts "#{s}.draw LINE1" - end + all_instance_names.sort.each do |s| + puts "#{s}.label #{s}" + puts "#{s}.info INSTANCE: #{s}" + puts "#{s}.type GAUGE" + puts "#{s}.draw LINE1" + end - exit 0 + exit 0 end sth = dbh.prepare(logfilesize_query) all_instance_names.sort.each do |k| - sth.execute(k) - sth.fetch do |row| - puts "#{k.to_s}.value #{row[0].to_s}" - end + sth.execute(k) + sth.fetch do |row| + puts "#{k}.value #{row[0]}" + end end sth.finish dbh.disconnect diff --git a/plugins/mumble/murmur-stats b/plugins/mumble/murmur-stats index bb87951d..03178659 100755 --- a/plugins/mumble/murmur-stats +++ b/plugins/mumble/murmur-stats @@ -41,4 +41,4 @@ print "uptime.value %.2f" % (float(meta.getUptime())/60/60/24) print "chancount.value %.1f" % (len(server.getChannels())/10) print "bancount.value %i" % (len(server.getBans())) -ice.shutdown() +ice.shutdown() diff --git a/plugins/healthcheck/healthcheck_log b/plugins/munin/healthcheck_log similarity index 98% rename from plugins/healthcheck/healthcheck_log rename to plugins/munin/healthcheck_log index 247527de..a47f5f4f 100755 --- a/plugins/healthcheck/healthcheck_log +++ b/plugins/munin/healthcheck_log @@ -78,9 +78,9 @@ MINUTE_BY_GREP_RANGE=10 if [ "$1" = "autoconf" ]; then if [ $CHECKMAX -le 1 ]; then echo no - exit 1 + else + echo yes fi - echo yes exit 0 fi @@ -89,7 +89,7 @@ if [ "$1" = "config" ]; then echo "graph_args --base 1000 -l 0 --vertical-label match_count" echo 'graph_scale no' echo 'graph_vlabel match_count' - echo 'graph_category other' + echo 'graph_category munin' echo 'graph_info This graph shows the bad event count on log' for(( I = 1; I < $CHECKMAX; ++I )) diff --git a/plugins/healthcheck/healthcheck_process b/plugins/munin/healthcheck_process similarity index 95% rename from plugins/healthcheck/healthcheck_process rename to plugins/munin/healthcheck_process index 5c674df8..685938bc 100755 --- a/plugins/healthcheck/healthcheck_process +++ b/plugins/munin/healthcheck_process @@ -15,14 +15,14 @@ #env.process_1 httpd #--------------------------------------------------- # -#chcek two process +#check two process #--------------------------------------------------- #[healthcheck_process] #env.process_1 httpd #env.process_2 samba #--------------------------------------------------- # -#chcek three process +#check three process #--------------------------------------------------- #[healthcheck_process] #env.process_1 httpd @@ -40,9 +40,9 @@ let CHECKMAX="$CHECKMAX + 1" if [ "$1" = "autoconf" ]; then if [ $CHECKMAX -le 1 ]; then echo no - exit 1 + else + echo yes fi - echo yes exit 0 fi @@ -52,7 +52,7 @@ if [ "$1" = "config" ]; then echo "graph_args --base 1000 -l 0 --vertical-label MB" echo 'graph_scale no' echo 'graph_vlabel process memory' - echo 'graph_category other' + echo 'graph_category munin' echo 'graph_info This graph shows the Memory used by process' for(( I = 1; I < $CHECKMAX; ++I )) diff --git a/plugins/healthcheck/healthcheck_url b/plugins/munin/healthcheck_url similarity index 98% rename from plugins/healthcheck/healthcheck_url rename to plugins/munin/healthcheck_url index 77a0edea..e7faedf3 100755 --- a/plugins/healthcheck/healthcheck_url +++ b/plugins/munin/healthcheck_url @@ -17,14 +17,14 @@ #env.url_1 http://127.0.0.1/ #--------------------------------------------------- # -#check two site +#check two site #--------------------------------------------------- #[healthcheck_url] #env.url_1 http://127.0.0.1/ #env.url_2 http://www.google.com/ #--------------------------------------------------- # -#check three site +#check three site #--------------------------------------------------- #[healthcheck_url] #env.url_1 http://127.0.0.1/ @@ -90,9 +90,9 @@ CURL=/usr/bin/curl if [ "$1" = "autoconf" ]; then if [ $CHECKMAX -le 1 ]; then echo no - exit 1 + else + echo yes fi - echo yes exit 0 fi @@ -101,7 +101,7 @@ if [ "$1" = "config" ]; then echo "graph_args --base 1000 -l 0 --vertical-label second" echo 'graph_scale no' echo 'graph_vlabel second' - echo 'graph_category other' + echo 'graph_category munin' echo 'graph_info This graph shows the site speed' for(( I = 1; I < $CHECKMAX; ++I )) diff --git a/plugins/munin/munin_events b/plugins/munin/munin_events index 2a355bd9..73f36dd0 100755 --- a/plugins/munin/munin_events +++ b/plugins/munin/munin_events @@ -71,7 +71,7 @@ do_value() { FIELD="$1" EVENT_LABEL="$2" - EVENT_COUNT="$("$logtail_bin" -t "$muninupdate" 2> /dev/null | grep -c "^[0-9/: ]\{19\} \[${EVENT_LABEL}\]")" + EVENT_COUNT="$("$logtail_bin" -t "$muninupdate" 2> /dev/null | grep -c '^[0-9/: ]\{19\} \['"${EVENT_LABEL}"'\]')" if echo "$EVENT_COUNT" | grep -q "[^0-9]"; then echo "Cannot determine event count" 1>&2 exit 10 diff --git a/plugins/other/update b/plugins/munin/update similarity index 95% rename from plugins/other/update rename to plugins/munin/update index c86bce9c..e59ae59f 100755 --- a/plugins/other/update +++ b/plugins/munin/update @@ -40,11 +40,11 @@ fi if [ "$1" = "autoconf" ]; then if [ -f $UPDATE_STATSFILE ]; then - echo "yes" - exit 0 + echo "yes" + else + echo "no (logfile not readable)" fi - echo "no (logfile not readable)" - exit 1 + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/mysql/hs_read b/plugins/mysql/hs_read old mode 100644 new mode 100755 diff --git a/plugins/mysql/hs_write b/plugins/mysql/hs_write old mode 100644 new mode 100755 diff --git a/plugins/mysql/mysql-schema-size b/plugins/mysql/mysql-schema-size index d1227479..05a210f9 100755 --- a/plugins/mysql/mysql-schema-size +++ b/plugins/mysql/mysql-schema-size @@ -63,7 +63,7 @@ EOT; public function getSchemaSize() { $sql = <<getTableSize($schema,$table); $data_length = $row['data_length']; echo "$table.value $data_length\n"; @@ -74,7 +74,7 @@ EOT; public function getSchemaSize($schema) { $bind = array($schema); $sql = <<: created +2020-07-19: v 1.1 pcy : added config options + +=head1 USAGE + +Parameters understood: + + config (required) + autoconf (optional - used by munin-config) + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf +""" + + +from datetime import datetime, timedelta, timezone +import operator +import os +import struct +import sys + + +def weakbool(x): + return x.lower().strip() in {'true', 'yes', 'y', '1'} + + +logfile = os.getenv('logfile', '/var/log/mysql/audit.log') +toplist = int(os.getenv('toplist', '0')) +sortlist = weakbool(os.getenv('sorted', 'N')) or toplist > 0 + +STATEFILE = os.getenv('MUNIN_STATEFILE') + + +def loadstate(): + if not os.path.isfile(STATEFILE): + return None + + with open(STATEFILE, 'rb') as f: + tstamp = struct.unpack('d', f.read())[0] + return datetime.fromtimestamp(tstamp, tz=timezone.utc) + + +def savestate(state): + with open(STATEFILE, 'wb') as f: + f.write(struct.pack('d', state.timestamp())) + + +def reverse_lines(filename, BUFSIZE=4096): + with open(filename, "r") as f: + f.seek(0, 2) + p = f.tell() + remainder = "" + while True: + sz = min(BUFSIZE, p) + p -= sz + f.seek(p) + buf = f.read(sz) + remainder + if '\n' not in buf: + remainder = buf + else: + i = buf.index('\n') + for L in buf[i + 1:].split("\n")[::-1]: + yield L + remainder = buf[:i] + if p == 0: + break + yield remainder + + +def get_data(do_save=True): + occurrences = {} + begin = datetime.now(timezone.utc) + begin_local = datetime.now() + + state = loadstate() + if state is None: + # need to do something here to prevent reading indefinitely + state = begin - timedelta(minutes=5) + + for line in reverse_lines(logfile): + if ',CONNECT,' not in line: + continue + + split = line.split(',') + key = split[2] + date = datetime.strptime(split[0], '%Y%m%d %H:%M:%S') + # hack to add timezone data to the datetime + date = begin + (date - begin_local) + + if date < state: + break + + occurrences[key] = occurrences.get(key, 0) + 1 + + if do_save: + savestate(begin) + + return occurrences + + +def autoconf(): + print("no (logfile not found)" if os.path.isfile(logfile) else "yes") + + +def configure(): + print('graph_title MySQL Audit connect count') + print('graph_vlabel Connections') + print('graph_category mysql') + + occurrences = get_data(False) + occitems = occurrences.items() + occitems = sorted(occitems, key=operator.itemgetter(1 if sortlist else 0), + reverse=sortlist) + if toplist > 0: + occitems = occitems[:toplist] + + for key, value in occitems: + print('{}.label {}'.format(key.lower(), key)) + print('{}.type GAUGE'.format(key.lower())) + print('{}.draw AREASTACK'.format(key.lower())) + + +def fetch(): + occurrences = get_data() + occitems = occurrences.items() + occitems = sorted(occitems, key=operator.itemgetter(1 if sortlist else 0), + reverse=sortlist) + if toplist > 0: + occitems = occitems[:toplist] + + for key, value in occitems: + print('{}.value {}'.format(key, value)) + + +if len(sys.argv) == 2 and sys.argv[1] == "autoconf": + autoconf() +elif len(sys.argv) == 2 and sys.argv[1] == "config": + configure() +else: + fetch() diff --git a/plugins/mysql/mysql_connections b/plugins/mysql/mysql_connections index 5f779760..38991504 100755 --- a/plugins/mysql/mysql_connections +++ b/plugins/mysql/mysql_connections @@ -16,9 +16,9 @@ # along with this program; if not, see http://www.gnu.org/licenses/gpl.txt # # -# This plugin is based off of the Connection Usage +# This plugin is based off of the Connection Usage # section of the MySQL Connection Health Page -# +# # http://dev.mysql.com/doc/administrator/en/mysql-administrator-health-connection-health.html # # To enable, link mysql_connections to this file. E.g. @@ -38,7 +38,7 @@ # mysqlopts - Options to pass to mysql # mysqladmin - Override location of mysqladmin # warning - Override default warning limit -# critical - Override default critical limit +# critical - Override default critical limit # #%# family=auto #%# capabilities=autoconf @@ -55,7 +55,7 @@ my $critical = $ENV{critical} || "90"; # Pull in any arguments my $arg = shift(); -# Check to see how the script was called +# Check to see how the script was called if ($arg eq 'config') { print_graph_information(); exit(); @@ -66,7 +66,7 @@ if ($arg eq 'config') { } else { # Define the values that are returned to munin my ($available, $current, $upper_limit) = (0,0,0); - + # Gather the values from mysqladmin $current = poll_variables($MYSQL_VARIABLES,"Threads_connected"); $upper_limit = poll_variables($MYSQL_VARIABLES,"max_connections"); @@ -83,7 +83,7 @@ sub poll_variables { my $expression = shift; my $ret = 0; open(SERVICE, "$command |") - or die("Coult not execute '$command': $!"); + or die("Could not execute '$command': $!"); while () { my ($field, $value) = (m/(\w+).*?(\d+(?:\.\d+)?)/); next unless ($field); diff --git a/plugins/mysql/mysql_connections_per_user b/plugins/mysql/mysql_connections_per_user index db09d326..fa2d65a0 100755 --- a/plugins/mysql/mysql_connections_per_user +++ b/plugins/mysql/mysql_connections_per_user @@ -16,9 +16,9 @@ # along with this program; if not, see http://www.gnu.org/licenses/gpl.txt # # -# This plugin is based off of the Connection Usage +# This plugin is based off of the Connection Usage # section of the MySQL Connection Health Page -# +# # http://dev.mysql.com/doc/administrator/en/mysql-administrator-health-connection-health.html # # To enable, link mysql_connections to this file. E.g. @@ -46,7 +46,7 @@ # mysqlcli - Override location of mysql # numusers - Override maximum number of users to display # warning - Override default warning limit -# critical - Override default critical limit +# critical - Override default critical limit # #%# family=auto #%# capabilities=autoconf @@ -65,7 +65,7 @@ my $numthreads = 0; # Pull in any arguments my $arg = shift(); -# Check to see how the script was called +# Check to see how the script was called if ($arg eq 'config') { print_graph_information(); } elsif ($arg eq 'autoconf') { @@ -100,6 +100,9 @@ sub print_graph_data() { if($print_user eq "root") { $print_user = "root_"; } + if ( $print_user =~ /[^A-Za-z0-9]/ ) { + $print_user =~ s/[^A-Za-z0-9]/_/; + } print "$print_user.value $counts{$user}\n"; } my $other = $numthreads - $total; @@ -137,9 +140,13 @@ EOM if($print_user eq "root") { $print_user = "root_"; } + if ( $print_user =~ /[^A-Za-z0-9]/ ) { + $print_user =~ s/[^A-Za-z0-9]/_/; + } + print <: created + +=head1 USAGE + +Parameters understood: + + config (required) + autoconf (optional - used by munin-config) + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf +""" + + +import os +import re +import sys + + +def weakbool(x): + return x.lower().strip() in {'true', 'yes', 'y', 1} + + +LIMIT = os.getenv('db_minsize', str(50000 * 1024)) +try: + LIMIT = int(LIMIT) +except ValueError: + LIMIT = 0 + +exceptions = os.getenv('prefix_with_underscores', '').split(' ') +if exceptions == ['']: + exceptions = [] + +mysqldir = os.getenv('mysql_db_dir', '/var/lib/mysql') + + +def name_from_path(path): + filename = os.path.basename(path) + for name in exceptions: + if filename.startswith(name): + return name + name = filename.split('_')[0] + + def decode_byte(m): + return bytes.fromhex(m.group(1)).decode('utf-8') + + # Decode MySQL's encoding of non-ascii characters in the table names + return re.sub('@00([0-9a-z]{2})', decode_byte, name) + + +def calc_dir_size(directory): + total = 0 + + for filename in os.listdir(directory): + filedir = os.path.join(directory, filename) + + if os.path.islink(filedir): + continue + if os.path.isfile(filedir): + total += os.path.getsize(filedir) + + return total + + +def size_per_subdir(parentdir): + for subdir in os.listdir(parentdir): + dirpath = os.path.join(parentdir, subdir) + + if os.path.islink(dirpath): + continue + if os.path.isdir(dirpath): + yield calc_dir_size(dirpath), name_from_path(os.path.split(dirpath)[1]) + + +def sizes_by_name(limit=None): + sizes = {} + for size, name in size_per_subdir(mysqldir): + sizes[name] = sizes.get(name, 0) + size + for name, total_size in sizes.items(): + if limit <= 0 or limit <= total_size: + yield name, total_size + + +def fetch(): + for name, total_size in sorted(sizes_by_name(limit=LIMIT), key=lambda x: x[0]): + print('{}.value {}'.format(name, total_size)) + + +def main(): + if len(sys.argv) == 1: + fetch() + elif sys.argv[1] == 'config': + print('graph_title MySQL disk usage by prefix') + print('graph_vlabel bytes') + print('graph_category db') + + names = sorted(name for name, _ in sizes_by_name(limit=LIMIT)) + for name in names: + print('{0}.label {0}'.format(name)) + print('{}.type GAUGE'.format(name)) + print('{}.draw AREASTACK'.format(name)) + elif sys.argv[1] == 'autoconf': + print('yes' if os.path.isdir(mysqldir) + else "no (can't find MySQL's data directory)") + else: + fetch() + + +if __name__ == "__main__": + main() diff --git a/plugins/mysql/mysql_qcache b/plugins/mysql/mysql_qcache index 605adf9b..269a3e3b 100755 --- a/plugins/mysql/mysql_qcache +++ b/plugins/mysql/mysql_qcache @@ -59,7 +59,7 @@ if ($arg eq 'config') { open(SERVICE, "$COMMAND |") - or die("Coult not execute '$COMMAND': $!"); + or die("Could not execute '$COMMAND': $!"); while () { my ($k, $v) = (m/(\w+).*?(\d+(?:\.\d+)?)/); @@ -93,7 +93,7 @@ graph_info Plugin available at ) { my ($k, $v) = (m/(\w+).*?(\d+(?:\.\d+)?)/); @@ -75,7 +75,7 @@ while () { close(SERVICE); open(SERVICE, "$COMMANDSIZE |") - or die("Coult not execute '$COMMANDSIZE': $!"); + or die("Could not execute '$COMMANDSIZE': $!"); while () { my ($k, $v) = (m/(\w+).*?(\d+(?:\.\d+)?)/); diff --git a/plugins/mysql/mysql_report b/plugins/mysql/mysql_report index e8bf94fa..a680df38 100755 --- a/plugins/mysql/mysql_report +++ b/plugins/mysql/mysql_report @@ -60,13 +60,12 @@ default_args="--base 1000 -l 0" default_scale="no" if [ "${1}" = "autoconf" ]; then - result=0 if [ -z "${mysqlbin}" ]; then - echo "no" + echo "no (missing mysql executable)" else echo "yes" fi - exit $result + exit 0 fi if [ -z "${names}" ]; then @@ -156,4 +155,4 @@ for name in ${names}; do fi done - + diff --git a/plugins/mysql/mysql_size_ b/plugins/mysql/mysql_size_ index 1b96107f..9d980255 100755 --- a/plugins/mysql/mysql_size_ +++ b/plugins/mysql/mysql_size_ @@ -47,8 +47,8 @@ my $db = $2; my $MYSQLADMIN = $ENV{mysqladmin} || "mysql"; -my %WANTED = ( "Index" => "index", - "Datas" => "datas", +my %WANTED = ( "Index" => "index", + "Data" => "data", ); my $arg = shift(); @@ -65,14 +65,14 @@ if ($arg eq 'config') { exit; } -my $datas = 0; +my $data_count = 0; my $indexes = 0; my (@infos,$info,$i_data,$i_index); my $COMMAND = "$MYSQLADMIN $ENV{mysqlopts} $db -e 'show table status;' | grep 'Data'"; open(SERVICE, "$COMMAND |") - or die("Coult not execute '$COMMAND': $!"); + or die("Could not execute '$COMMAND': $!"); while () { (@infos) = split; @@ -95,16 +95,16 @@ foreach $info (@infos) { $COMMAND = "$MYSQLADMIN $ENV{mysqlopts} $db -e 'show table status;' | cut -f $i_data,$i_index | grep -v leng"; open(SERVICE, "$COMMAND |") - or die("Coult not execute '$COMMAND': $!"); + or die("Could not execute '$COMMAND': $!"); while () { (m/(\d+).*?(\d+(?:\.\d+)?)/); - $datas += $1; + $data_count += $1; $indexes += $2; } close(SERVICE); -print("datas.value $datas\n"); +print("data.value $data_count\n"); print("index.value $indexes\n"); diff --git a/plugins/mysql/mysql_size_all b/plugins/mysql/mysql_size_all index c6f617b9..6d5da300 100755 --- a/plugins/mysql/mysql_size_all +++ b/plugins/mysql/mysql_size_all @@ -48,10 +48,6 @@ use strict; my $COMMAND; my $MYSQLADMIN = $ENV{mysqladmin} || "mysql"; -my %WANTED = ( "Index" => "index", - "Datas" => "datas", - ); - my $arg = shift(); if ($arg eq 'config') { @@ -69,14 +65,12 @@ if ($arg eq 'config') { sub getDBList; foreach my $db (getDBList()) { - my $datas = 0; - my $indexes = 0; my (@infos,$info,$i_data,$i_index); $COMMAND = "$MYSQLADMIN $ENV{mysqlopts} $db -e 'show table status;' | head -n 1"; open(SERVICE, "$COMMAND |") - or die("Coult not execute '$COMMAND': $!"); + or die("Could not execute '$COMMAND': $!"); while () { (@infos) = split; @@ -100,20 +94,16 @@ foreach my $db (getDBList()) { $COMMAND = "$MYSQLADMIN $ENV{mysqlopts} $db -e 'show table status;' | cut -f $i_data,$i_index | grep -v leng"; open(SERVICE, "$COMMAND |") - or die("Coult not execute '$COMMAND': $!"); + or die("Could not execute '$COMMAND': $!"); while () { (m/(\d+).*?(\d+(?:\.\d+)?)/); - $datas += $1; - $indexes += $2; + $total_size += $1 + $2; } close(SERVICE); - $total_size = $datas+$indexes; } print("$db.value $total_size\n"); -# print("datas.value $datas\n"); -# print("index.value $indexes\n"); } @@ -170,7 +160,7 @@ sub test_service { sub getDBList { my @dbs; foreach my $f (glob("/var/lib/mysql/*")) { - if (-d $f) { + if (-d $f) { $f =~ s/\@002d/-/g; $f =~ s!.*/!!; @dbs[$#dbs+1]=$f }; diff --git a/plugins/mysql/mysql_size_ondisk b/plugins/mysql/mysql_size_ondisk index 1045b04a..432a7114 100755 --- a/plugins/mysql/mysql_size_ondisk +++ b/plugins/mysql/mysql_size_ondisk @@ -42,11 +42,10 @@ then if du -sb $DIR &> /dev/null then echo "yes" - exit 0 else echo "no" - exit 1 fi + exit 0 elif [ "${1:-}" = "config" ] then echo "graph_title MySQL on-disk database size" diff --git a/plugins/mysql/mysql_slave_threads b/plugins/mysql/mysql_slave_threads index f09f97af..47a2091d 100755 --- a/plugins/mysql/mysql_slave_threads +++ b/plugins/mysql/mysql_slave_threads @@ -24,6 +24,19 @@ use strict; my $MYSQL = $ENV{mysql} || "mysql"; my $MYSQLOPTS = $ENV{mysqlopts} || ""; +my $status = `$MYSQL $MYSQLOPTS -e 'SHOW SLAVE STATUS\\G'`; + +my $sqlerror = ""; +my $ioerror = ""; + +if ($status =~ /Last_SQL_Error: (.+)/) { + $sqlerror = $1; +} +if ($status =~ /Last_IO_Error: (.+)/) { + $ioerror = $1; +} + + if(defined $ARGV[0] && $ARGV[0] eq 'config') { print </dev/null; then echo "no (femon not installed)" - exit 1 - fi - - if [ -e /dev/dvb/adapter$Tuner/ ]; then + elif [ -e "/dev/dvb/adapter$Tuner/" ]; then echo yes - exit 0 else echo "no (dvb tuner $Tuner not found)" - exit 1 - fi;; + fi + exit 0 + ;; config) echo "graph_title DVB signal strength" diff --git a/plugins/mythtv/mythtv_programs b/plugins/mythtv/mythtv_programs index 826549a1..4abcf559 100755 --- a/plugins/mythtv/mythtv_programs +++ b/plugins/mythtv/mythtv_programs @@ -2,8 +2,8 @@ # # Munin plugin for MythTV # This plugin can graph:- EPG programs per channel -# -# NOTE: This plugin needs to run as root so add the following to your munin-node config file +# +# NOTE: This plugin needs to run as root so add the following to your munin-node config file # [mythtv_status*] # user=root # @@ -42,14 +42,12 @@ my $Channel=""; if ($ARGV[0] and $ARGV[0] eq "autoconf" ) { if ( $SQLDBName ne "" ) { print "yes\n"; - exit 0; } else { - print "no\n"; - print "cannot find MythTV configuration file my.txt\n"; - exit 1; + print "no (cannot find MythTV configuration file my.txt)\n"; } - } - + exit 0; + } + #Config Options ##Configuration for encoder, no config data needs to read from anywhere if ($ARGV[0] and $ARGV[0] eq "config"){ @@ -71,16 +69,16 @@ my $Channel=""; print "Channel" . $Channel . "EPG.label EPG days for channel $gata\n"; $Ptr=0; } - } + } exit 0; } #Actually dump data to Munin - @result=SQLQuery("SELECT o.chanid, (UNIX_TIMESTAMP(MAX(c.endtime)) - - UNIX_TIMESTAMP(NOW()))/86400 - FROM channel o, program c + @result=SQLQuery("SELECT o.chanid, (UNIX_TIMESTAMP(MAX(c.endtime)) + - UNIX_TIMESTAMP(NOW()))/86400 + FROM channel o, program c WHERE o.chanid = c.chanid - AND o.visible = '1' + AND o.visible = '1' GROUP BY o.chanid"); my $Ptr=0; foreach $gata (@result) { @@ -89,10 +87,10 @@ my $Channel=""; $Ptr=1; } else { if ( $gata > 12 ) { - print "Channel" . $Channel . "EPG.value 12.0\n"; - } else { + print "Channel" . $Channel . "EPG.value 12.0\n"; + } else { print "Channel" . $Channel . "EPG.value $gata\n"; - } + } $Ptr=0; } } @@ -104,7 +102,7 @@ exit 0; sub PrepSQLRead { my $hostname = `hostname`; chomp($hostname); - + # Read the mysql.txt file in use by MythTV. Could be in a couple places, so try the usual suspects my $found = 0; my @mysql = ('/usr/local/share/mythtv/mysql.txt', @@ -153,7 +151,7 @@ sub SQLQuery { my ($QUERY) = @_; my @data; my $ref; - my $dbh = DBI->connect("DBI:mysql:$SQLDBName:$SQLServer", $SQLUser, $SQLPassword) + my $dbh = DBI->connect("DBI:mysql:$SQLDBName:$SQLServer", $SQLUser, $SQLPassword) or die "Couldn't connect to database: " . DBI->errstr; my $table_data = $dbh->prepare($QUERY) or die "Couldn't prepare statement: " . $dbh->errstr; $table_data->execute or die "Couldn't execute statement: " . $table_data->errstr; @@ -165,5 +163,5 @@ sub SQLQuery { return @data; } else { return 0; - } + } } diff --git a/plugins/mythtv/mythtv_status_ b/plugins/mythtv/mythtv_status_ index dbace488..dfaf31e0 100755 --- a/plugins/mythtv/mythtv_status_ +++ b/plugins/mythtv/mythtv_status_ @@ -2,19 +2,19 @@ # # Munin plugin for MythTV # This plugin can graph:- Encoder Status, Days Remaining in Schedule, Job schedule, Recording Schedule, Recorded Programes, Recorded hours -# +# # Create a symbolic link to mythtv_status_{GraphType} # Where {GraphType} can be encoder, epg, job, schedule, recorded # for example mythtv_status_encoder # -# NOTE: This plugin needs to run as root so add the following to your munin-node config file +# NOTE: This plugin needs to run as root so add the following to your munin-node config file # [mythtv_status*] # user=root # The http/xml status page must be enabled in the mythtv backend. # # $Log$ # Revision 0.1 2008/03/27 idobson -# Code for all options except recorded implemented +# Code for all options except recorded implemented # # Revision 0.2 2008/03/28 idobson # Tidied up the code abit/removed dead functions @@ -35,7 +35,7 @@ # Revision 0.7 2008/04/3 idobson # Now using SQL to read the number of days in the EPG # Changed recordings symlink to schedule, makes more sense -# +# # Revision 0.8 2008/04/6 idobson # Tidied up the SQL code abit, moved it into a function. # @@ -43,7 +43,7 @@ # Added a check that we got the XML data before trying to parse it. # # Revision 1.0 2008/04/15 idobson -# Fixed undef returned from SQL query, it now returns 0, +# Fixed undef returned from SQL query, it now returns 0, # added error handler to SQL sub. It just dies with an error text. # # Revision 1.1 2008/05/03 idobson @@ -94,7 +94,7 @@ my $RecHoursLiveTV=0; my $result=""; my $gata=""; my $VideoInput=1; - $GraphOption=`basename $0 | sed 's/^mythtv_status_//g' | tr '_' '-'` ; + $GraphOption=`basename $0 | sed 's/^mythtv_status_//g' | tr '_' '-'` ; chomp $GraphOption; PrepSQLRead(); @@ -103,7 +103,7 @@ PrepSQLRead(); if ($ARGV[0] and $ARGV[0] eq "autoconf" ) { print "yes\n"; exit 0; - } + } #Config Options ##Configuration for encoder, no config data needs to read from anywhere @@ -135,7 +135,7 @@ PrepSQLRead(); print "FreeEncoders.colour 00FF00\n"; print "HungEncoders.colour 000000\n"; print "HungEncoders.draw LINE1\n"; - print "HungEncoders.label Encoders that have hung (no DB update)\n"; + print "HungEncoders.label Encoders that have hung (no DB update)\n"; print "HungEncoders.warning 0:0\n"; } @@ -147,7 +147,7 @@ PrepSQLRead(); print "graph_title MythTV EPG days/Programs\n"; print "graph_category tv\n"; print "graph_vlabel Days\/Programs\n"; - + @result=SQLQuery("SELECT DISTINCT `sourceid` FROM `cardinput`"); $VideoInput = 1; foreach $gata (@result) { @@ -259,7 +259,7 @@ PrepSQLRead(); @result=SQLQuery("SELECT count(*) FROM `capturecard` "); my $FreeRecorders=$result[0]; my $ActiveTuners=0; - @result=SQLQuery("SELECT videosource.name, count( inuseprograms.recusage ) + @result=SQLQuery("SELECT videosource.name, count( inuseprograms.recusage ) FROM inuseprograms, channel, videosource WHERE inuseprograms.recusage = 'recorder' AND inuseprograms.chanid = channel.chanid @@ -283,8 +283,8 @@ PrepSQLRead(); } print "FreeEncoders.value $FreeRecorders\n"; @result=SQLQuery("SELECT count(inuseprograms.recusage) - from inuseprograms - where inuseprograms.recusage = 'recorder' + from inuseprograms + where inuseprograms.recusage = 'recorder' and (UNIX_TIMESTAMP( NOW( )) - UNIX_TIMESTAMP(inuseprograms.lastupdatetime)) / 60 > 20"); print "HungEncoders.value $result[0]\n"; } @@ -296,7 +296,7 @@ PrepSQLRead(); foreach $gata (@result) { print "EPGDays$VideoInput.value $gata\n"; $VideoInput++; - } + } #Get number of programs in EPG per video source $VideoInput = 1; @@ -324,7 +324,7 @@ PrepSQLRead(); @result=SQLQuery("SELECT COUNT(*) FROM `record`"); print "RecordingSchedules.value $result[0]\n"; -#Connect to mythtv using the MythTV object +#Connect to mythtv using the MythTV object my $Repeats=0; my $Recordings=0; my $Conflicts=0; @@ -374,7 +374,7 @@ exit 0; sub PrepSQLRead { my $hostname = `hostname`; chomp($hostname); - + # Read the mysql.txt file in use by MythTV. Could be in a couple places, so try the usual suspects my $found = 0; my @mysql = ('/usr/local/share/mythtv/mysql.txt', @@ -423,7 +423,7 @@ sub SQLQuery { my ($QUERY) = @_; my @data; my $ref; - my $dbh = DBI->connect_cached("DBI:mysql:$SQLDBName:$SQLServer", $SQLUser, $SQLPassword) + my $dbh = DBI->connect_cached("DBI:mysql:$SQLDBName:$SQLServer", $SQLUser, $SQLPassword) or die "Couldn't connect to database: " . DBI->errstr; my $table_data = $dbh->prepare($QUERY) or die "Couldn't prepare statement: " . $dbh->errstr; $table_data->execute or die "Couldn't execute statement: " . $table_data->errstr; @@ -435,7 +435,7 @@ sub SQLQuery { return @data; } else { return 0; - } + } } # Returns true if the show is scheduled to record diff --git a/plugins/nagios/nagios_multi_ b/plugins/nagios/nagios_multi_ index f9031dd9..cb45d85a 100755 --- a/plugins/nagios/nagios_multi_ +++ b/plugins/nagios/nagios_multi_ @@ -14,7 +14,7 @@ A Plugin to monitor Nagios Servers and their Performance (Multigraph) =head2 MUNIN ENVIRONMENT CONFIGURATION EXPLANATION binary = location of your nagiostats binary including binary - passive = tell the plugin to graph passive results + passive = tell the plugin to graph passive results =head1 NODE CONFIGURATION @@ -60,7 +60,7 @@ Matt West < https://github.com/mhwest13/Nagios-Munin-Plugin > GPLv2 =head1 MAGIC MARKERS - + #%# family=auto #%# capabilities=autoconf suggest @@ -76,7 +76,7 @@ if (basename($0) !~ /^nagios_multi_/) { exit 1; } -# tell munin about our multigraph capabilties +# tell munin about our multigraph capabilities need_multigraph(); # import binary information or use default setting @@ -122,12 +122,12 @@ my $passive = $ENV{passive} || 'off'; my %graphs; -# main graph for service checks +# main graph for service checks $graphs{services} = { config => { args => '--lower-limit 0', vlabel => 'Service Problems', - category => 'nagios', + category => 'munin', title => 'Service Problems', info => 'Current Service Problems by Alert Status', }, @@ -144,7 +144,7 @@ $graphs{svcchkdetail} = { config => { args => '--lower-limit 0', vlabel => 'Total Number of Service Checks', - category => 'details', + category => 'munin', title => 'Detailed Service Info', info => 'Detailed Service Check Information', }, @@ -164,7 +164,7 @@ $graphs{svcchksc} = { config => { args => '--lower-limit 0 --upper-limit 100', vlabel => '%', - category => 'statechange', + category => 'munin', title => 'Service State Change', info => 'Total Percent of State Change between checks', }, @@ -180,7 +180,7 @@ $graphs{svcchklat} = { config => { args => '--lower-limit 0', vlabel => 'time (ms)', - category => 'latency', + category => 'munin', title => 'Service Check Latency Times', info => 'Service Check Latency Times', }, @@ -196,7 +196,7 @@ $graphs{svcchkext} = { config => { args => '--lower-limit 0', vlabel => 'time (ms)', - category => 'execution', + category => 'munin', title => 'Service Check Execution Times', info => 'Service Check Execution Times', }, @@ -207,12 +207,12 @@ $graphs{svcchkext} = { { name => 'AVGACTSVCEXT', label => 'Average Execution', min => '0', type => 'GAUGE', info => 'avg active service check execution time (ms).', draw => 'LINE2' }, ], }; -# main graph for host problems +# main graph for host problems $graphs{hosts} = { config => { args => '--lower-limit 0', vlabel => 'Host Problems', - category => 'nagios', + category => 'munin', title => 'Host Problems', info => 'Current Host Problems by Alert Status', }, @@ -228,7 +228,7 @@ $graphs{hostchkdetail} = { config => { args => '--lower-limit 0', vlabel => 'Total Number of Host Checks', - category => 'details', + category => 'munin', title => 'Detailed Host Info', info => 'Detailed Host Check Information', }, @@ -247,7 +247,7 @@ $graphs{hostchksc} = { config => { args => '--lower-limit 0 --upper-limit 100', vlabel => '%', - category => 'statechange', + category => 'munin', title => 'Host State Change', info => 'Total Percent of State Change between checks', }, @@ -263,7 +263,7 @@ $graphs{hostchklat} = { config => { args => '--lower-limit 0', vlabel => 'time (ms)', - category => 'latency', + category => 'munin', title => 'Host Check Latency Times', info => 'Host Check Latency Times', }, @@ -279,7 +279,7 @@ $graphs{hostchkext} = { config => { args => '--lower-limit 0', vlabel => 'time (ms)', - category => 'execution', + category => 'munin', title => 'Host Check Execution Times', info => 'Host Check Execution Times', }, @@ -290,12 +290,12 @@ $graphs{hostchkext} = { { name => 'AVGACTHSTEXT', label => 'Average Execution', min => '0', type => 'GAUGE', info => 'avg active host check execution time (ms).', draw => 'LINE2' }, ], }; -# main graph for host / service check counts +# main graph for host / service check counts $graphs{checks} = { config => { args => '--lower-limit 0', vlabel => 'Total Number of Checks', - category => 'nagios', + category => 'munin', title => 'Totals', info => 'Total Number of Service and Host Checks', }, @@ -310,7 +310,7 @@ $graphs{hostchkactcount} = { config => { args => '--lower-limit 0', vlabel => 'Number Host Checks', - category => 'active', + category => 'munin', title => 'Host Checks', info => 'Total Number of Active Host Checks', order => 'NUMHSTACTCHK60M NUMHSTACTCHK15M NUMHSTACTCHK5M NUMHSTACTCHK1M', @@ -328,7 +328,7 @@ $graphs{hostchkpsvcount} = { config => { args => '--lower-limit 0', vlabel => 'Number Host Checks', - category => 'passive', + category => 'munin', title => 'Host Checks', info => 'Total Number of Passive Host Checks', order => 'NUMHSTPSVCHK60M NUMHSTPSVCHK15M NUMHSTPSVCHK5M NUMHSTPSVCHK1M', @@ -346,7 +346,7 @@ $graphs{svcchkactcount} = { config => { args => '--lower-limit 0', vlabel => 'Number of Service Checks', - category => 'active', + category => 'munin', title => 'Service Checks', info => 'Total Number of Active Service Checks', order => 'NUMSVCACTCHK60M NUMSVCACTCHK15M NUMSVCACTCHK5M NUMSVCACTCHK1M', @@ -364,7 +364,7 @@ $graphs{svcchkpsvcount} = { config => { args => '--lower-limit 0', vlabel => 'Number of Service Checks', - category => 'passive', + category => 'munin', title => 'Service Checks', info => 'Total Number of Passive Service Checks', order => 'NUMSVCPSVCHK60M NUMSVCPSVCHK15M NUMSVCPSVCHK5M NUMSVCPSVCHK1M', @@ -382,7 +382,7 @@ $graphs{extcmdcount} = { config => { args => '--lower-limit 0', vlabel => 'Number of Ext Command Slots', - category => 'externalcmds', + category => 'munin', title => 'External Commands', info => 'External Command Buffer Slot Information', }, @@ -404,8 +404,8 @@ $graphs{extcmdcount} = { =head2 Config Check This block of code looks at the argument that is possibly supplied, - should it be config, it then checks to make sure the plugin - specified exists, assuming it does, it will run the do_config + should it be config, it then checks to make sure the plugin + specified exists, assuming it does, it will run the do_config subroutine for the plugin specified, otherwise it dies complaining about an unknown plugin. @@ -618,7 +618,7 @@ sub do_config { =cut sub print_sub_config { - # Lets get our plugin and subgraph, after that print for Munin to process it. + # Lets get our plugin and subgraph, after that print for Munin to process it. my ($plugin,$subgraph) = (@_); my $graph = $graphs{$subgraph}; print "multigraph nagios_$plugin.$subgraph\n"; diff --git a/plugins/nagios/nagiosstatus b/plugins/nagios/nagiosstatus index 57200814..8222e380 100755 --- a/plugins/nagios/nagiosstatus +++ b/plugins/nagios/nagiosstatus @@ -1,6 +1,6 @@ #!/usr/bin/perl -w # -# Copyright (C) 2008 Rune Nordbe Skillingstad +# Copyright (C) 2008 Rune Nordbøe Skillingstad # # Plugin to monitor status in Nagios # @@ -14,7 +14,7 @@ # Config variables: # # statuslog - Which logfile to use -# Might be /var/log/nagios2/nagios.log if +# Might be /var/log/nagios2/nagios.log if # /var/log/nagios/status.log is missing # # This program is free software; you can redistribute it and/or @@ -46,11 +46,10 @@ if ($ARGV[0]) { if ($ARGV[0] eq 'autoconf') { if (-r $NAGIOSSTAT) { print "yes"; - exit 0; } else { print "no (Nagios status file not found)"; - exit 1; } + exit 0; } elsif ($ARGV[0] eq "config") { print "graph_args --base 1000 -l 0 --vertical-label Checks\n"; print "graph_title Nagios status\n"; @@ -109,7 +108,7 @@ while() { if(/^\s+\}\s*$/) { $type = ""; } - + if($type) { push(@{$values{$type}}, $_); } diff --git a/plugins/netapp/snmp__netapp_cifs b/plugins/netapp/snmp__netapp_cifs index 75e7365a..23acc2c7 100755 --- a/plugins/netapp/snmp__netapp_cifs +++ b/plugins/netapp/snmp__netapp_cifs @@ -4,21 +4,21 @@ """ # The SNMP traps for the NetApp filer can be found in -# /net/netappfiler/vol0/etc/mib/traps.dat if the filer is +# /net/netappfiler/vol0/etc/mib/traps.dat if the filer is # NFS automounted mounted on server. # Example: the SNMP id for cpuBusyTimePerCent is -# snmp.1.3.6.1.4.1.789.1.2.1.3.0 -# and retrival of this value is done by +# snmp.1.3.6.1.4.1.789.1.2.1.3.0 +# and retrieval of this value is done by # snmpget -v 1 -c public netappfiler 1.3.6.1.4.1.789.1.2.1.3.0 # -# Requires snmpget and assumes public community. +# Requires snmpget and assumes public community. import commands import sys # Provided a servername and a snmpid it returns the value stripped of bogus information. def snmpget(iservername,isnmpid): - runcmd = 'snmpget -v 1 -c public ' + iservername + ' ' + isnmpid + runcmd = 'snmpget -v 1 -c public ' + iservername + ' ' + isnmpid output = commands.getoutput(runcmd) return output.split()[3] @@ -34,14 +34,14 @@ if len(sys.argv) == 2 and sys.argv[1] == "config": print 'graph_args --base 1000 -l 0' print 'graph_vlabel number' print 'graph_category fs' - print 'graph_info This graph shows CIFS usage on '+servername + print 'graph_info This graph shows CIFS usage on '+servername print 'cifsConnectedUsers.label ConnectedUsers' print 'cifsConnectedUsers.info The current number of CIFS users on the filer' - + print 'cifsNSessions.label NumberOfSessions' print 'cifsNSessions.info The current number of active CIFS session on the filer' - + print 'cifsNOpenFiles.label NumberOfOpenfiles' print 'cifsNOpenFiles.info The number of open CIFS files and directories on the filer' sys.exit(0) diff --git a/plugins/netapp/snmp__netapp_cifs2 b/plugins/netapp/snmp__netapp_cifs2 index 40814761..140fa48f 100755 --- a/plugins/netapp/snmp__netapp_cifs2 +++ b/plugins/netapp/snmp__netapp_cifs2 @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_cifscalls b/plugins/netapp/snmp__netapp_cifscalls index 5ff673d3..a1d862b3 100755 --- a/plugins/netapp/snmp__netapp_cifscalls +++ b/plugins/netapp/snmp__netapp_cifscalls @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_cpu b/plugins/netapp/snmp__netapp_cpu index ea56b09b..95350e55 100755 --- a/plugins/netapp/snmp__netapp_cpu +++ b/plugins/netapp/snmp__netapp_cpu @@ -4,14 +4,14 @@ """ # The SNMP traps for the NetApp filer can be found in -# /net/netappfiler/vol0/etc/mib/traps.dat if the filer is +# /net/netappfiler/vol0/etc/mib/traps.dat if the filer is # NFS automounted mounted on server. # Example: the SNMP id for cpuBusyTimePerCent is -# snmp.1.3.6.1.4.1.789.1.2.1.3.0 -# and retrival of this value is done by +# snmp.1.3.6.1.4.1.789.1.2.1.3.0 +# and retrieval of this value is done by # snmpget -v 1 -c public netappfiler 1.3.6.1.4.1.789.1.2.1.3.0 # -# Requires snmpget and assumes public community. +# Requires snmpget and assumes public community. import commands import sys @@ -24,7 +24,7 @@ def snmpget(iservername,isnmpid): snmpid = "1.3.6.1.4.1.789.1.2.1.3.0" warning = 80 -critical = 95 +critical = 95 servername = sys.argv[0].split('_')[1] if len(sys.argv) == 2 and sys.argv[1] == "config": diff --git a/plugins/netapp/snmp__netapp_cpu2 b/plugins/netapp/snmp__netapp_cpu2 index 651a2077..b1a577af 100755 --- a/plugins/netapp/snmp__netapp_cpu2 +++ b/plugins/netapp/snmp__netapp_cpu2 @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_diskbusy b/plugins/netapp/snmp__netapp_diskbusy index 35a4b262..da44a4ec 100755 --- a/plugins/netapp/snmp__netapp_diskbusy +++ b/plugins/netapp/snmp__netapp_diskbusy @@ -64,7 +64,7 @@ sub do_collect sub do_config_root { # graph_category san # To show plugin in Gallery also in this category - + my ($host) = @_; print "multigraph diskbusy\n"; diff --git a/plugins/netapp/snmp__netapp_diskusage2_ b/plugins/netapp/snmp__netapp_diskusage2_ index 4f76d021..fe0e5312 100755 --- a/plugins/netapp/snmp__netapp_diskusage2_ +++ b/plugins/netapp/snmp__netapp_diskusage2_ @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_diskutil b/plugins/netapp/snmp__netapp_diskutil index f6a19c57..4efada8b 100755 --- a/plugins/netapp/snmp__netapp_diskutil +++ b/plugins/netapp/snmp__netapp_diskutil @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_ndmp b/plugins/netapp/snmp__netapp_ndmp index e37b28b4..24275f07 100755 --- a/plugins/netapp/snmp__netapp_ndmp +++ b/plugins/netapp/snmp__netapp_ndmp @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_net b/plugins/netapp/snmp__netapp_net index b2ab602b..09935dcc 100755 --- a/plugins/netapp/snmp__netapp_net +++ b/plugins/netapp/snmp__netapp_net @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_nfs3calls b/plugins/netapp/snmp__netapp_nfs3calls index 44acdb21..c85130d9 100755 --- a/plugins/netapp/snmp__netapp_nfs3calls +++ b/plugins/netapp/snmp__netapp_nfs3calls @@ -12,11 +12,11 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is -insecure because it doesn't encrypt the community string. +insecure because it doesn't encrypt the community string. -The following parameters will help you get this plugin working : +The following parameters will help you get this plugin working: [snmp_*] env.community MyCommunity diff --git a/plugins/netapp/snmp__netapp_ops b/plugins/netapp/snmp__netapp_ops index a443749d..3ffd15d1 100755 --- a/plugins/netapp/snmp__netapp_ops +++ b/plugins/netapp/snmp__netapp_ops @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netapp/snmp__netapp_sis b/plugins/netapp/snmp__netapp_sis index 640c2f39..07dbff10 100755 --- a/plugins/netapp/snmp__netapp_sis +++ b/plugins/netapp/snmp__netapp_sis @@ -13,7 +13,7 @@ with SNMP agent daemon activated. See na_snmp(8) for details. =head1 CONFIGURATION -Unfortunately, SNMPv3 is not fully supported on all NetApp equipments. +Unfortunately, SNMPv3 is not fully supported on all NetApp equipment. For this reason, this plugin will use SNMPv2 by default, which is insecure because it doesn't encrypt the community string. diff --git a/plugins/netscaler/snmp__netscaler_connections b/plugins/netscaler/snmp__netscaler_connections index d8048643..8b6952a1 100755 --- a/plugins/netscaler/snmp__netscaler_connections +++ b/plugins/netscaler/snmp__netscaler_connections @@ -23,9 +23,9 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # -# ---------------------------------------------------- # +# ---------------------------------------------------- # =head1 NAME @@ -33,7 +33,7 @@ netscaler_conn - Munin plugin to monitor netscaler connections =head1 CONFIGURATION -Make a symlink from netscaler_conn_ to /etc/munin/plugins/netscaler_conn_. +Make a symlink from netscaler_conn_ to /etc/munin/plugins/netscaler_conn_. You can omit , then you need the env variable B. To configure the plugin, use ENV variables. @@ -148,7 +148,7 @@ if ($ARGV[0] and $ARGV[0] eq "config") { print "ssl.draw LINE2\n"; print "ssl.info Currently active SSL sessions.\n"; - for my $field qw(client server ssl) { + for my $field (qw(client server ssl)) { print_thresholds($field); } exit 0; @@ -173,7 +173,7 @@ $return_str .= "ssl.value $counter1\n"; &close_session($session); -print "$return_str"; +print "$return_str"; exit 0; # --------------------------- functions ------------------------- # @@ -203,7 +203,7 @@ sub close_session { sub get_buildversion { my ($session) = @_; my $build_version; - + my $result = $session->get_request( -varbindlist => [$oid_build_version] ); @@ -228,7 +228,7 @@ sub get_oid_values { if (!defined($result)) { return "na"; } - else { + else { $return_value = $result->{$oid_string}; return $return_value; } diff --git a/plugins/netscaler/snmp__netscaler_cpu b/plugins/netscaler/snmp__netscaler_cpu index b9ff57f1..a13f447d 100755 --- a/plugins/netscaler/snmp__netscaler_cpu +++ b/plugins/netscaler/snmp__netscaler_cpu @@ -23,9 +23,9 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # -# ---------------------------------------------------- # +# ---------------------------------------------------- # =head1 NAME @@ -33,7 +33,7 @@ netscaler_cpu - Munin plugin to monitor CPU usage =head1 CONFIGURATION -Make a symlink from netscaler_cpu_ to /etc/munin/plugins/netscaler_cpu_. +Make a symlink from netscaler_cpu_ to /etc/munin/plugins/netscaler_cpu_. You can omit , then you need the env variable B. To configure the plugin, use ENV variables. @@ -96,7 +96,7 @@ $o_community = undef; $o_port = 161; my $return_str = ""; -my @cpu_name = (); +my @cpu_name = (); # ---------------------------- snmp ---------------------------- # @@ -153,17 +153,17 @@ if ($ARGV[0] and $ARGV[0] eq "config") { print "avg.label avg\n"; print "avg.draw AREA\n"; print "avg.info Average load.\n"; - foreach my $v (@cpu_name){ - print $v.".label ".$v."\n"; - print $v.".draw LINE2\n"; - print $v.".info CPU usage of ".$v."\n"; - print_thresholds($v); + foreach my $v (@cpu_name){ + print $v.".label ".$v."\n"; + print $v.".draw LINE2\n"; + print $v.".info CPU usage of ".$v."\n"; + print_thresholds($v); } - for my $field qw(avg) { + for my $field (qw(avg)) { print_thresholds($field); } - &close_session($session); + &close_session($session); exit 0; } @@ -176,7 +176,7 @@ if ($ARGV[0] and $ARGV[0] eq "config") { &close_session($session); -print "$return_str"; +print "$return_str"; exit 0; # --------------------------- functions ------------------------- # @@ -206,7 +206,7 @@ sub close_session { sub get_buildversion { my ($session) = @_; my $build_version; - + my $result = $session->get_request( -varbindlist => [$oid_build_version] ); @@ -268,7 +268,7 @@ sub get_cpus { return 0; } else { - @cpu_name = (); + @cpu_name = (); foreach my $n ($session->var_bind_names()) { push @cpu_name, $session->var_bind_list()->{$n}; } diff --git a/plugins/network/bandwidth-OVH-Network b/plugins/network/bandwidth-OVH-Network index e3bed44b..880c91e6 100755 --- a/plugins/network/bandwidth-OVH-Network +++ b/plugins/network/bandwidth-OVH-Network @@ -12,6 +12,7 @@ if [ "$1" = "config" ]; then echo 'graph_args --base 1000 -l 0' echo 'graph_vlabel MB/s' echo 'graph_scale no' + echo 'graph_category network' echo "download.label download" exit 0 fi diff --git a/plugins/network/bandwidth_ b/plugins/network/bandwidth_ index 09b3410c..031709b8 100755 --- a/plugins/network/bandwidth_ +++ b/plugins/network/bandwidth_ @@ -18,7 +18,7 @@ bandwidth_ to this file. E.g. Most likely usage is to monitor an interface connected to your ISP. -The suggest option will try and determine if you have any interfaces with a +The suggest option will try and determine if you have any interfaces with a public IP and if so it will suggest monitoring those interfaces. If all IP addresses are private the setup will have to be done manually. Suggest does not handle IPv6 addresses. @@ -192,12 +192,12 @@ EOM } sub read_traffic { - open( my $rx, "<", "/sys/class/net/$interface/statistics/rx_bytes" ) + open( my $rx, "<", "/sys/class/net/$interface/statistics/rx_bytes" ) || die "Unable to read: $!"; $counter_input = <$rx>; chomp $counter_input; close($rx); - open(my $tx , "<", "/sys/class/net/$interface/statistics/tx_bytes" ) + open(my $tx , "<", "/sys/class/net/$interface/statistics/tx_bytes" ) || die "Unable to read: $!"; $counter_output = <$tx>; chomp $counter_output; @@ -231,7 +231,7 @@ sub update_stats { else { if ( $perf_ref->{last}->{counter_input} > $counter_input ) { $input = - $counter_input + $counter_input + $rollover - $perf_ref->{last}->{counter_input}; } @@ -240,7 +240,7 @@ sub update_stats { } if ( $perf_ref->{last}->{counter_output} > $counter_output ) { $output = - $counter_output + $counter_output + $rollover - $perf_ref->{last}->{counter_output}; } diff --git a/plugins/network/bgpd b/plugins/network/bgpd index 8624a221..dc0c7515 100755 --- a/plugins/network/bgpd +++ b/plugins/network/bgpd @@ -1,7 +1,7 @@ #!/bin/sh # bgpd.sh -# +# # # Created by spleen. diff --git a/plugins/network/brc_rssi b/plugins/network/brc_rssi index 348f8263..ae5a886c 100755 --- a/plugins/network/brc_rssi +++ b/plugins/network/brc_rssi @@ -6,7 +6,7 @@ # NOTE: NEEDS NON FREE UTILITY "wl" # Configuration: # [brc_rssi] -# env.WIFISIDE eth0 # Set the WiFi side interface. Used to filter arp entries. +# env.WIFISIDE eth0 # Set the WiFi side interface. Used to filter arp entries. # # On a openwrt box defaults to "nvram get lan_ifname" otherwise # # no default. # @@ -46,7 +46,7 @@ graph_vlabel dB(?) graph_category network graph_info This plugin shows the RSSI (Received Signal Strength Indication) as reported by the Access Point (AP) driver. The plugin is specific to broadcom wireless chipsets such as used on WRT hardware. We're not quite sure about the units the RSSI is measured in as this is not documented. Both dB and dBm are apparently candidates. Higher is better. EOF - # Atempt to find default. "Set default" and "assign default" syntax is + # Attempt to find default. "Set default" and "assign default" syntax is # not available in busybox (which is used in openwrt firmware) it seems. # So work around it with case. : ${WIFISIDE:=$(nvram get lan_ifname 2>/dev/null)} @@ -58,7 +58,7 @@ EOF m=$(echo $M | tr -d ':') LABEL=$M NAME='' - + IP=$(echo "$ETHERS" | awk '/^'$M'/ { print $2; }') case $IP in '') :;; @@ -77,11 +77,11 @@ EOF do_autoconf () { case $WLERR in - 0) echo yes; exit 0;; - 127) echo "no ($AL)"; exit 1;; - *) echo "no (wl error: $AL)"; exit 1;; - *) echo "no (no wl executable, or error)"; exit 1;; + 0) echo yes;; + 127) echo "no ($AL)";; + *) echo "no (wl error: $AL)";; esac + exit 0 } case $1 in diff --git a/plugins/network/ddclient b/plugins/network/ddclient old mode 100644 new mode 100755 index cebf6db7..bb3ca78a --- a/plugins/network/ddclient +++ b/plugins/network/ddclient @@ -5,7 +5,7 @@ # The base frame is copied from the proftp plugin # ########################################################################################## -# Folgende Eintraege in der Datei /etc/munin/plugin-conf.d/munin-node nicht vergessen ! # +# Folgende Eintraege in der Datei /etc/munin/plugin-conf.d/munin-node nicht vergessen ! # # Don't forget to add following lines to the file /etc/munin/plugin-conf.d/munin-node # # [quota] # # user root # @@ -47,10 +47,10 @@ fi # Nur fuer Testzwecke kann das - For testing only you can -# Zeitfenster vergroessert werden resize the reference periode +# Zeitfenster vergroessert werden resize the reference period if [ "${1//[^[:digit:]]}" != "" ]; then factor=${1//[^[:digit:]]} - else + else factor=1 fi @@ -60,7 +60,7 @@ fi # Zeitfenster in Sekunden - time slot in seconds let Timeslot=60*30*$factor -# Referenzzeitpunkt berechnen - calculate the reference periode +# Referenzzeitpunkt berechnen - calculate the reference period let Ref_Timestamp=Timestamp-Timeslot # Zeitstempel der letzten Aktualisierung - timestampe of the last update @@ -75,12 +75,12 @@ fi echo "ip_change.value 0" fi -# Nur zum Testen - for testing ony +# Nur zum Testen - for testing only if [ "$factor" -gt 1 ]; then - echo "======================== Nur fuer Testzwecke ======================" + echo "======================== Nur fuer Testzwecke ======================" echo "Timestamp :" $Timestamp $(date -d "1970-01-01 UTC + $Timestamp seconds") echo "Ref_Timestamp:" $Ref_Timestamp $(date -d "1970-01-01 UTC + $Ref_Timestamp seconds") - echo "Zeitfenster :" $((Timeslot/60)) Minuten + echo "Zeitfenster :" $((Timeslot/60)) Minuten echo "Last_update :" $Last_update $(date -d "1970-01-01 UTC + $Last_update seconds") - echo "======================== for testing only ======================" + echo "======================== for testing only ======================" fi diff --git a/plugins/network/denyhosts b/plugins/network/denyhosts index d1456c4b..8eda6f17 100755 --- a/plugins/network/denyhosts +++ b/plugins/network/denyhosts @@ -1,6 +1,6 @@ #!/bin/bash # -# Plugin to monitor the number of hosts denied by DenyHosts +# Plugin to monitor the number of hosts denied by DenyHosts # # $Log$ # Revision 1.0 2009/06/05 16:00:00 tjansson @@ -19,11 +19,10 @@ LOG=/etc/hosts.deny if [ "$1" = "autoconf" ]; then if [ -r "$LOG" ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/network/dns/dnsresponse_ b/plugins/network/dns/dnsresponse_ index 684d92bd..b3821f57 100755 --- a/plugins/network/dns/dnsresponse_ +++ b/plugins/network/dns/dnsresponse_ @@ -93,11 +93,10 @@ if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { if (! -s $resconf) { $ret .= "$resconf not found. "; } if ($ret) { print "no ($ret)\n"; - exit 1; } else { print "yes\n"; - exit 0; } + exit 0; } if ( defined $ARGV[0] and $ARGV[0] eq "suggest" ) { diff --git a/plugins/network/ethtool_ b/plugins/network/ethtool_ index be9ea456..8d477e73 100755 --- a/plugins/network/ethtool_ +++ b/plugins/network/ethtool_ @@ -42,11 +42,10 @@ if [ "$1" = "autoconf" ]; then $ETHTOOL 2>/dev/null >/dev/null if [ $? -ne 0 ]; then echo no - exit 1 else echo yes - exit 0 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/network/netstat_s_/netstat_s_tcp_connections_connections-day-linux.png b/plugins/network/example-graphs/netstat_s_-day.png similarity index 100% rename from plugins/network/netstat_s_/netstat_s_tcp_connections_connections-day-linux.png rename to plugins/network/example-graphs/netstat_s_-day.png diff --git a/plugins/network/netstat_s_/netstat_s_tcp_connections_connections-month-freebsd.png b/plugins/network/example-graphs/netstat_s_-month.png similarity index 100% rename from plugins/network/netstat_s_/netstat_s_tcp_connections_connections-month-freebsd.png rename to plugins/network/example-graphs/netstat_s_-month.png diff --git a/plugins/network/example-graphs/olsrd-link_quality_day.png b/plugins/network/example-graphs/olsrd-link_quality_day.png new file mode 100644 index 00000000..512c1cf6 Binary files /dev/null and b/plugins/network/example-graphs/olsrd-link_quality_day.png differ diff --git a/plugins/network/example-graphs/olsrd-neighbour_link_count_day.png b/plugins/network/example-graphs/olsrd-neighbour_link_count_day.png new file mode 100644 index 00000000..223a784a Binary files /dev/null and b/plugins/network/example-graphs/olsrd-neighbour_link_count_day.png differ diff --git a/plugins/network/fms b/plugins/network/fms index 29eeb219..c1502ebd 100755 --- a/plugins/network/fms +++ b/plugins/network/fms @@ -68,7 +68,7 @@ # For autoconfiguration you'll also need the following Perl module: # # Proc::ProcessTable - Perl extension to access the unix process table -# http://search.cpan.org/perldoc?Proc::ProcessTable +# http://search.cpan.org/perldoc?Proc::ProcessTable # # On a Debian/Ubuntu system you can install this with the following command # (if APT is configured properly): @@ -213,7 +213,7 @@ if ( !( ( $usenetstat eq "yes" and defined($fmshost) and defined($fmsport) ) or # 2. Look for the FMS config file in ${FMS_DIR}/conf/fms.ini. # 3. Fetch host, port, admin username and password values from the # config file. - + # check that plugin is running with root privileges if ( $> == 0 ) { my $ProcTable = new Proc::ProcessTable; @@ -272,11 +272,10 @@ if ( !( ( $usenetstat eq "yes" and defined($fmshost) and defined($fmsport) ) or if ( defined($ARGV[0]) and $ARGV[0] eq "autoconf" ) { if ( ( $usenetstat eq "yes" and defined($fmshost) and defined($fmsport) ) or ( $usenetstat eq "no" and defined($adminhost) and defined($adminport) and defined($adminusername) and defined($adminpassword) ) ) { print("yes\n"); - exit 0; } else { print("no\n"); - exit 1; } + exit 0; } if ( $usenetstat eq "yes" and defined($fmshost) and defined($fmsport) ) { diff --git a/plugins/network/fms_apps b/plugins/network/fms_apps index 06beed5d..d0bd5cd9 100755 --- a/plugins/network/fms_apps +++ b/plugins/network/fms_apps @@ -11,7 +11,7 @@ # # libwww-perl (LWP) Perl library # Proc::ProcessTable Perl module -# +# # Tested with: # Debian Etch # Macromedia Flash Media Server 2.0.3 r68 @@ -32,7 +32,7 @@ # http://search.cpan.org/perldoc?LWP # # Proc::ProcessTable - Perl extension to access the unix process table -# http://search.cpan.org/perldoc?Proc::ProcessTable +# http://search.cpan.org/perldoc?Proc::ProcessTable # # On a Debian/Ubuntu system: # apt-get install libwww-perl libproc-process-perl @@ -150,7 +150,7 @@ if ( !( defined($host) and defined($port) and defined($username) and defined($pa # 2. Look for the FMS config file in ${FMS_DIR}/conf/fms.ini. # 3. Fetch host, port, admin username and password values from the # config file. - + # check that plugin is running with root privileges if ( $> == 0 ) { my $ProcTable = new Proc::ProcessTable; @@ -200,11 +200,10 @@ if ( defined($ARGV[0]) ) { if ( $ARGV[0] eq "autoconf" ) { if ( defined($host) and defined($port) and defined($username) and defined($password) ) { print("yes\n"); - exit 0; } else { print("no\n"); - exit 1; } + exit 0; } elsif ( $ARGV[0] eq "config" ) { print <<'END_GRAPH_CONFIG'; graph_title Flash Media Server application connections diff --git a/plugins/network/fms_apps_rate b/plugins/network/fms_apps_rate index 9a9b1573..41470e05 100755 --- a/plugins/network/fms_apps_rate +++ b/plugins/network/fms_apps_rate @@ -12,7 +12,7 @@ # # libwww-perl (LWP) Perl library # Proc::ProcessTable Perl module -# +# # Tested with: # Debian Etch # Macromedia Flash Media Server 2.0.3 r68 @@ -33,7 +33,7 @@ # http://search.cpan.org/perldoc?LWP # # Proc::ProcessTable - Perl extension to access the unix process table -# http://search.cpan.org/perldoc?Proc::ProcessTable +# http://search.cpan.org/perldoc?Proc::ProcessTable # # On a Debian/Ubuntu system: # apt-get install libwww-perl libproc-process-perl @@ -151,7 +151,7 @@ if ( !( defined($host) and defined($port) and defined($username) and defined($pa # 2. Look for the FMS config file in ${FMS_DIR}/conf/fms.ini. # 3. Fetch host, port, admin username and password values from the # config file. - + # check that plugin is running with root privileges if ( $> == 0 ) { my $ProcTable = new Proc::ProcessTable; @@ -201,11 +201,10 @@ if ( defined($ARGV[0]) ) { if ( $ARGV[0] eq "autoconf" ) { if ( defined($host) and defined($port) and defined($username) and defined($password) ) { print("yes\n"); - exit 0; } else { print("no\n"); - exit 1; } + exit 0; } elsif ( $ARGV[0] eq "config" ) { print <<'END_GRAPH_CONFIG'; graph_title Flash Media Server application connection rates diff --git a/plugins/network/fwbuilder_ b/plugins/network/fwbuilder_ index c7742d75..106f8d42 100755 --- a/plugins/network/fwbuilder_ +++ b/plugins/network/fwbuilder_ @@ -10,10 +10,10 @@ # Additionally, you need Accountingrules in fwbuilder # fwbuilder creates Chains in INPUT-, OUTPUT- and FORWARD-Chain # with Rules that "RETURN" -# You will have to specify rule options with name "ACCOUNTING" for the +# You will have to specify rule options with name "ACCOUNTING" for the # rules to use, otherwise no rules will be found. -# try "fwbuilder_ suggest" to see if all is ok. -# +# try "fwbuilder_ suggest" to see if all is ok. +# # # Furthermore, this plugin needs to be run as root for iptables to work # @@ -61,23 +61,21 @@ IP=${IP/-/\/} if [ "$1" = "autoconf" ]; then if [ -r /proc/net/dev ]; then - iptables -L INPUT -v -n -x >/dev/null 2>/dev/null + iptables -L INPUT -v -n -x -w >/dev/null 2>/dev/null if [ $? -gt 0 ]; then echo "no (could not run iptables as user `whoami`)" - exit 1 else echo yes - exit 0 fi else echo "no (/proc/net/dev not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then # find Chains for Accounting - iptables -L -n |grep ^ACCOUNTING |awk '{printf "%s\n%s\n",$4,$5}'| sort -u |sed 's#\/#-#' + iptables -L -n -w | grep ^ACCOUNTING |awk '{printf "%s\n%s\n",$4,$5}'| sort -u |sed 's#\/#-#' exit 0 fi @@ -95,10 +93,10 @@ if [ "$1" = "config" ]; then echo 'in.label received' echo 'in.type DERIVE' echo 'in.min 0' - echo 'in.cdef in,8,*' + echo 'in.cdef in,8,*' exit 0 fi; -echo "in.value $(( $(iptables -L -n -v -x |grep "ACCOUNTING" |awk '{printf "%s %s\n",$2,$9}' |grep $IP |awk '{printf "%s + ",$1}') 0 ))" -echo "out.value $(( $(iptables -L -n -v -x |grep "ACCOUNTING" |awk '{printf "%s %s\n",$2,$8}' |grep $IP |awk '{printf "%s + ",$1}') 0 ))" +echo "in.value $(( $(iptables -L -n -v -x -w | grep "ACCOUNTING" | awk '{printf "%s %s\n",$2,$9}' | grep $IP | awk '{printf "%s + ",$1}') 0 ))" +echo "out.value $(( $(iptables -L -n -v -x -w |grep "ACCOUNTING" | awk '{printf "%s %s\n",$2,$8}' | grep $IP | awk '{printf "%s + ",$1}') 0 ))" diff --git a/plugins/network/hfsc b/plugins/network/hfsc index 14314069..fb1171d3 100755 --- a/plugins/network/hfsc +++ b/plugins/network/hfsc @@ -11,7 +11,7 @@ # http://www.elessar.one.pl/article_munin.php # ### -# Written by Rafal Rajs +# Written by Rafal Rajs # Date: 2007/06/19 # Email: elessar1@poczta.wp.pl # WWW: http://www.elessar.one.pl diff --git a/plugins/network/hfsc_sep b/plugins/network/hfsc_sep index 114458d6..355e1c13 100755 --- a/plugins/network/hfsc_sep +++ b/plugins/network/hfsc_sep @@ -11,7 +11,7 @@ # http://www.elessar.one.pl/article_munin.php # ### -# Written by Rafal Rajs +# Written by Rafal Rajs # Date: 2007/06/19 # Email: elessar1@poczta.wp.pl # WWW: http://www.elessar.one.pl diff --git a/plugins/network/hostsdeny b/plugins/network/hostsdeny index d5196556..175b18ae 100755 --- a/plugins/network/hostsdeny +++ b/plugins/network/hostsdeny @@ -1,11 +1,14 @@ -#!/bin/bash +#!/bin/sh -eu # # Plugin to monitor the number of hosts in /etc/hosts.deny -# that are deined access to sshd -# +# that are denied access to sshd +# # Based on denyhosts plugin by tjansson (2009) # # Copyright (C) 2009 Kåre Hartvig Jensen (kaare.hartvig.jensen@gmail.com) +# Copyright (C) 2019 Olivier Mehani +# +# SPDX-License-Identifier: GPL-3.0-or-later # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -13,32 +16,50 @@ # (at your option) any later version. # # This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of +# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # +LOG=${LOG:-/etc/hosts.deny} -if [ "$1" = "autoconf" ]; then - if [ -r "$LOG" ]; then - echo yes - exit 0 - else - echo no - exit 1 - fi +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + set -x fi -if [ "$1" = "config" ]; then - - echo 'graph_title Hosts denied sshd access in /etc/hosts.deny' - echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel Hosts denied ' - echo 'graph_category system' - echo 'HostsDenied.label Hosts denied' +if [ "${1:-}" = "autoconf" ]; then + if [ -r "${LOG}" ]; then + echo yes + else + echo "no (${LOG} not readable or non-existent)" + fi exit 0 fi -echo HostsDenied.value `cat /etc/hosts.deny | grep sshd | wc -l` +COUNTS=$(sed -n 's/^\([^#]\+\):.*/\1/p' "${LOG}" \ + | sort \ + | uniq -c \ + | sed "s/^.*\s\([0-9]\+\)\s\(.*\)/\2.value \1/" + ) + +if [ "${1:-}" = "config" ]; then + + echo 'graph_title Hosts denied access' + echo "graph_info Hosts denied access in ${LOG}" + echo 'graph_args --base 1000 -l 0' + echo 'graph_vlabel Hosts denied ' + echo 'graph_category security' + # Assume we always have SSH + echo 'sshd.label sshd' + echo 'sshd.draw AREA' + echo "${COUNTS}" \ + | sed '/ssh/d; # skip ssh + s/^\([^\.]\+\)\..*/\1.label \1\n\1.draw STACK/' + if [ "${MUNIN_DIRTYCONFIG:-0}" != 1 ]; then + exit 0 + fi +fi + +echo "${COUNTS}" diff --git a/plugins/network/http__tp_link b/plugins/network/http__tp_link index 54a7eb5f..06fa489b 100755 --- a/plugins/network/http__tp_link +++ b/plugins/network/http__tp_link @@ -1,42 +1,40 @@ #!/usr/bin/env ruby -if $0 =~ /^(?:|.*\/)http_([^_]+)_/ - host = $1 -end +host = Regexp.last_match(1) if $0 =~ %r{^(?:|.*/)http_([^_]+)_} abort "# Error: couldn't understand what I'm supposed to monitor." unless host user = ENV['user'] || 'user' password = ENV['password'] || 'user' -if (ARGV[0] == 'config') +if ARGV[0] == 'config' puts "host_name #{host}" unless host == 'localhost' - puts "multigraph dsl_rate" - puts "graph_title DSL line speed" - puts "graph_args --base 1000 -l 0" - puts "graph_vlabel bps" - puts "graph_category network" - puts "downstream.label downstream" - puts "downstream.type GAUGE" - puts "downstream.min 0" - puts "downstream.cdef downstream,1000,*" - puts "upstream.label upstream" - puts "upstream.type GAUGE" - puts "upstream.min 0" - puts "upstream.cdef upstream,1000,*" + puts 'multigraph dsl_rate' + puts 'graph_title DSL line speed' + puts 'graph_args --base 1000 -l 0' + puts 'graph_vlabel bps' + puts 'graph_category network' + puts 'downstream.label downstream' + puts 'downstream.type GAUGE' + puts 'downstream.min 0' + puts 'downstream.cdef downstream,1000,*' + puts 'upstream.label upstream' + puts 'upstream.type GAUGE' + puts 'upstream.min 0' + puts 'upstream.cdef upstream,1000,*' - puts "multigraph dsl_snr" - puts "graph_title DSL SNR" - puts "graph_args --base 1000 -l 0" - puts "graph_vlabel dB" - puts "graph_scale no" - puts "graph_category network" - puts "downstream.label downstream" - puts "downstream.type GAUGE" - puts "downstream.min 0" - puts "upstream.label upstream" - puts "upstream.type GAUGE" - puts "upstream.min 0" + puts 'multigraph dsl_snr' + puts 'graph_title DSL SNR' + puts 'graph_args --base 1000 -l 0' + puts 'graph_vlabel dB' + puts 'graph_scale no' + puts 'graph_category network' + puts 'downstream.label downstream' + puts 'downstream.type GAUGE' + puts 'downstream.min 0' + puts 'upstream.label upstream' + puts 'upstream.type GAUGE' + puts 'upstream.min 0' exit 0 end @@ -45,7 +43,7 @@ require 'net/http' class TPAdslStats def initialize(host, user, password) - Net::HTTP.start( host ) do |http| + Net::HTTP.start(host) do |http| req = Net::HTTP::Get.new('/statsadsl.html') req.basic_auth user, password response = http.request(req) @@ -56,21 +54,21 @@ class TPAdslStats def field_values(label) if @html =~ />#{label}.*?([0-9.]+).*?([0-9.]+)/m - [$1, $2] + [Regexp.last_match(1), Regexp.last_match(2)] else - ['U', 'U'] + %w[U U] end end end stats = TPAdslStats.new(host, user, password) -puts "multigraph dsl_rate" +puts 'multigraph dsl_rate' rate = stats.field_values('Rate') puts "downstream.value #{rate[0]}" puts "upstream.value #{rate[1]}" -puts "multigraph dsl_snr" +puts 'multigraph dsl_snr' snr = stats.field_values('SNR') puts "downstream.value #{snr[0]}" puts "upstream.value #{snr[1]}" diff --git a/plugins/network/if b/plugins/network/if index 06af4154..312034aa 100755 --- a/plugins/network/if +++ b/plugins/network/if @@ -26,7 +26,7 @@ Virtual interface names prefixes with '~' This plugin is configurable environment variables. env.exclude - Removing interfaces from graphs, default empty env.include - Includes interfaces into graphs, default empty -env.if_max_bps - Maximum interface bps. Avialable suffixes: k, M, G, default empty +env.if_max_bps - Maximum interface bps. Available suffixes: k, M, G, default empty env.protexct_peaks - Protect graph peaks, default 'no' env.min_packet_size - Minimal network packet size, default 20 Example: @@ -73,7 +73,7 @@ my $ifpath = '/sys/class/net'; # ----------------------------------- global ----------------- my $interfaces = {}; -# ------------------------ avialable graphs ------------------------- +# ------------------------ available graphs ------------------------- my $graphs = { 'if_bit' => @@ -89,20 +89,20 @@ my $graphs = 'per_if_fields' => [qw(rx_bytes tx_bytes)], 'general_fields' => [qw(rx_bytes tx_bytes)] }, - 'if_packets' => + 'if_packets' => { 'munin' => { 'category' => 'network', 'args' => '--base 1000', - 'title' => ':if: traffic, packets', + 'title' => ':if: traffic, packets', 'vlabel' => 'Packets in (-) / out (+), avg. per second', 'info' => 'This graph shows the traffic in packets of the :if:, averaged value per second from last update' }, 'per_if_fields' => [qw(rx_packets tx_packets rx_compressed tx_compressed rx_dropped tx_dropped multicast)], 'general_fields' => [qw(rx_packets tx_packets)] }, - 'if_errors' => + 'if_errors' => { 'munin' => { @@ -120,7 +120,7 @@ my $graphs = { 'munin' => { - 'category' => 'wifi', + 'category' => 'wireless', 'args' => '--base 1000 -u 0', 'title' => ':if: signal and noise levels', 'vlabel' => 'dB', @@ -134,7 +134,7 @@ my $graphs = { 'munin' => { - 'category' => 'wifi', + 'category' => 'wireless', 'args' => '--base 1000', 'title' => ':if: link quality', 'vlabel' => '%', @@ -148,7 +148,7 @@ my $graphs = { 'munin' => { - 'category' => 'wifi', + 'category' => 'wireless', 'args' => '--base 1000', 'title' => ':if: errors', 'vlabel' => 'Errors RX (-) / TX (+)', @@ -178,7 +178,7 @@ my $graphs = { 'category' => 'network', 'args' => '--base 1024', - 'title' => ':if: avgerage packet size', + 'title' => ':if: average packet size', 'vlabel' => 'bytes', 'info' => 'This graph shows average packet size of the :if:' }, @@ -187,7 +187,7 @@ my $graphs = } }; -#-------------------------- avialable fields ------------------------- +#-------------------------- available fields ------------------------- # info: # 'munin' => {} - just copy fields to munin config # 'source' => - field data source @@ -199,12 +199,12 @@ my $graphs = # { # 'type' - types: # 'percent', -# 'full' => +# 'full' => # { # 'source' => 'interface', # 'name' => 'bps' # }, -# 'part' => +# 'part' => # { # 'source' => 'field', # 'name' => 'tx_bytes' @@ -262,7 +262,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'multicast' => + 'multicast' => { 'munin' => { @@ -284,8 +284,8 @@ my $fields = 'difference' => 'per_secund' }, # -------------------------------------------------------------------------- - 'rx_bytes' => - { + 'rx_bytes' => + { 'munin' => { 'type' => 'GAUGE', @@ -335,9 +335,9 @@ my $fields = { 'munin' => { - 'type' => 'GAUGE', - 'draw' => 'LINE1', - 'label' => 'CRC errors' , + 'type' => 'GAUGE', + 'draw' => 'LINE1', + 'label' => 'CRC errors' , 'info' => 'CRC errors' }, 'source' => @@ -774,12 +774,12 @@ my $fields = 'calculated' => { 'type' => 'percent', - 'full' => + 'full' => { 'source' => 'interface', 'name' => 'bps' }, - 'part' => + 'part' => { 'source' => 'field', 'name' => 'rx_bytes' @@ -809,12 +809,12 @@ my $fields = 'calculated' => { 'type' => 'percent', - 'full' => + 'full' => { 'source' => 'interface', 'name' => 'bps' }, - 'part' => + 'part' => { 'source' => 'field', 'name' => 'tx_bytes' @@ -889,7 +889,7 @@ my $fields = 'peack_protect' => 'packet_size_range' }, # -------------------------------------------------------------------------- - 'retries' => + 'retries' => { 'munin' => { @@ -906,7 +906,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'nwid' => + 'nwid' => { 'munin' => { @@ -928,7 +928,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'misc' => + 'misc' => { 'munin' => { @@ -945,7 +945,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'fragment' => + 'fragment' => { 'munin' => { @@ -967,7 +967,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'beacon' => + 'beacon' => { 'munin' => { @@ -984,7 +984,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'crypt' => + 'crypt' => { 'munin' => { @@ -1006,7 +1006,7 @@ my $fields = 'difference' => 'count' }, # -------------------------------------------------------------------------- - 'rx_wifierr' => + 'rx_wifierr' => { 'munin' => { @@ -1031,7 +1031,7 @@ my $fields = } }, # -------------------------------------------------------------------------- - 'tx_wifierr' => + 'tx_wifierr' => { 'munin' => { @@ -1056,13 +1056,13 @@ my $fields = need_multigraph(); -if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) +if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) { printf("%s\n", -e $ifpath ? "yes" : "no ($ifpath not exists)"); exit (0); } $interfaces = get_interfaces(); -if (defined($ARGV[0]) and ($ARGV[0] eq 'config')) +if (defined($ARGV[0]) and ($ARGV[0] eq 'config')) { print_config(); exit (0); @@ -1124,7 +1124,7 @@ sub get_interfaces for (keys %{$interfaces}) { $interfaces->{$_}{'name'} = sprintf("[%${maxlen}s]", $interfaces->{$_}{'name'}); } } else { die "$ifpath not exists\n"; } - + return $interfaces; } @@ -1169,7 +1169,7 @@ sub get_peak_range $range->{'max'} = $interfaces->{$if}{'bps'}; $range->{'min'} = 0; } - # packets per sec + # packets per sec elsif($fields->{$field}{'peack_protect'} eq 'max_interface_pps' and defined ($interfaces->{$if}{'bps'})) { $range->{'max'} = $interfaces->{$if}{'bps'}/$min_packet_size; @@ -1204,36 +1204,36 @@ sub check_field_avialability #------------------------------ percent ------------------------ if($fields->{$field}{'source'}{'calculated'}{'type'} eq 'percent') { - my %avialable; + my %available; for my $a ('full', 'part') { if($fields->{$field}{'source'}{'calculated'}{$a}{'source'} eq 'interface') { - $avialable{$a} = exists($interfaces->{$if}{$fields->{$field}{'source'}{'calculated'}{$a}{'name'}}); + $available{$a} = exists($interfaces->{$if}{$fields->{$field}{'source'}{'calculated'}{$a}{'name'}}); } elsif($fields->{$field}{'source'}{'calculated'}{$a}{'source'} eq 'field') { - $avialable{$a} = check_field_avialability($if, $fields->{$field}{'source'}{'calculated'}{$a}{'name'}); + $available{$a} = check_field_avialability($if, $fields->{$field}{'source'}{'calculated'}{$a}{'name'}); } } - $fields->{$field}{'avialable'}{$if} = ($avialable{'full'} and $avialable{'part'}); + $fields->{$field}{'available'}{$if} = ($available{'full'} and $available{'part'}); } #------------------------------ division ------------------------ elsif($fields->{$field}{'source'}{'calculated'}{'type'} eq 'division') { - my %avialable; + my %available; for my $a ('dividend', 'divider') { if($fields->{$field}{'source'}{'calculated'}{$a}{'source'} eq 'interface') { - $avialable{$a} = exists($interfaces->{$if}{$fields->{$field}{'source'}{'calculated'}{$a}{'name'}}); + $available{$a} = exists($interfaces->{$if}{$fields->{$field}{'source'}{'calculated'}{$a}{'name'}}); } elsif($fields->{$field}{'source'}{'calculated'}{$a}{'source'} eq 'field') { - $avialable{$a} = check_field_avialability($if, $fields->{$field}{'source'}{'calculated'}{$a}{'name'}); + $available{$a} = check_field_avialability($if, $fields->{$field}{'source'}{'calculated'}{$a}{'name'}); } } - $fields->{$field}{'avialable'}{$if} = ($avialable{'dividend'} and $avialable{'divider'}); + $fields->{$field}{'available'}{$if} = ($available{'dividend'} and $available{'divider'}); } #------------------------------ sum ------------------------ elsif($fields->{$field}{'source'}{'calculated'}{'type'} eq 'sum') @@ -1283,10 +1283,10 @@ sub generate_field my ($config, $graph_name, $field, $if, $is_general_graph) = @_[0..4]; return '' unless(check_field_avialability($if, $field)); my $field_graph_name = $is_general_graph ? sprintf("%s_%s", $if, $field) : $field; - for my $option (keys %{$fields->{$field}{'munin'}}) + for my $option (keys %{$fields->{$field}{'munin'}}) { next if exists($config->{$graph_name}{'fields'}{$field_graph_name}{$option}); - $config->{$graph_name}{'fields'}{$field_graph_name}{$option} = replace_if_template($fields->{$field}{'munin'}{$option}, $interfaces->{$if}{'name'}); + $config->{$graph_name}{'fields'}{$field_graph_name}{$option} = replace_if_template($fields->{$field}{'munin'}{$option}, $interfaces->{$if}{'name'}); } if(exists($fields->{$field}{'cdef'})) { @@ -1300,13 +1300,13 @@ sub generate_field { $up_field = $fields->{$down_field}{'negative'}{'name'}; $up_field_name = $is_general_graph ? sprintf("%s_%s", $if, $up_field) : $up_field; - $config->{$graph_name}{'fields'}{$up_field_name}{'label'} = + $config->{$graph_name}{'fields'}{$up_field_name}{'label'} = concat_names($fields->{$down_field}{'munin'}{'label'}, $fields->{$up_field}{'munin'}{'label'}, $is_general_graph ? $if : ''); } elsif($fields->{$down_field}{'negative'}{'type'} eq 'dummy') { $up_field_name = $is_general_graph ? sprintf("%s_%s_dummy", $if, $down_field) : sprintf("%s_dummy", $down_field); - $config->{$graph_name}{'fields'}{$up_field_name}{'label'} = + $config->{$graph_name}{'fields'}{$up_field_name}{'label'} = concat_names($fields->{$down_field}{'munin'}{'label'}, $fields->{$down_field}{'munin'}{'label'}, $is_general_graph ? $if : ''); $config->{$graph_name}{'fields'}{$up_field_name}{'info'} = $fields->{$down_field}{'munin'}{'info'}; } @@ -1344,7 +1344,7 @@ sub generate_graph { for my $field (@{$graphs->{$graph}{'general_fields'}}) { - for my $general_if (keys %{$interfaces}) + for my $general_if (keys %{$interfaces}) { my $res_field = generate_field($config, $graph_name, $field, $general_if, 1); push(@order, $res_field) if $res_field ne ''; @@ -1361,9 +1361,9 @@ sub generate_graph } if(scalar(@order) > 0) { - for my $option (keys %{$graphs->{$graph}{'munin'}}) + for my $option (keys %{$graphs->{$graph}{'munin'}}) { - $config->{$graph_name}{'graph'}{$option} = replace_if_template($graphs->{$graph}{'munin'}{$option}, $is_general_graph ? 'All interfaces' : $interfaces->{$if}{'name'}); + $config->{$graph_name}{'graph'}{$option} = replace_if_template($graphs->{$graph}{'munin'}{$option}, $is_general_graph ? 'All interfaces' : $interfaces->{$if}{'name'}); } $config->{$graph_name}{'graph'}{'order'} = join(' ', @order); # if scalar(@order) > 1; unless($is_general_graph) @@ -1378,7 +1378,7 @@ sub generate_graphs { my ($config, $graph) = @_[0..1]; generate_graph($config, $graph, '', 1); - for my $if (keys %{$interfaces}) + for my $if (keys %{$interfaces}) { generate_graph($config, $graph, $if, 0); } @@ -1565,9 +1565,9 @@ sub print_values for my $field (@{$graphs->{$graph}{'general_fields'}}) { for my $if (keys %{$interfaces}) { prepare_value($values, $field, sprintf("%s_%s", $if, $field), $graph, $if, $data, $raw_data, $raw_prev_data); } } } - for my $if (keys %{$interfaces}) + for my $if (keys %{$interfaces}) { - for my $graph (keys %{$graphs}) + for my $graph (keys %{$graphs}) { my $graph_name = sprintf("%s.%s", $graph, $if); for my $field (@{$graphs->{$graph}{'per_if_fields'}}) diff --git a/plugins/network/if1sec-c.c b/plugins/network/if1sec-c.c index 47f7842d..6cab7235 100644 --- a/plugins/network/if1sec-c.c +++ b/plugins/network/if1sec-c.c @@ -23,7 +23,7 @@ int fail(char* msg) { } /* Returns the ifname from a /proc/net/dev line - * It will return an inside pointer to line, and modifiy the end with a \0 + * It will return an inside pointer to line, and modify the end with a \0 */ char* get_ifname_from_procstatline(char* line) { char *ifname; @@ -38,8 +38,8 @@ char* get_ifname_from_procstatline(char* line) { int config() { /* Get the number of if */ - int f; - if ( !(f=open(PROC_STAT, O_RDONLY)) ) { + int f = open(PROC_STAT, O_RDONLY); + if ( f == -1 ) { return fail("cannot open " PROC_STAT); } @@ -78,7 +78,7 @@ int config() { "down.graph no" "\n" "down.cdef down,8,*" "\n" "down.min 0" "\n" - + "up.label bps" "\n" "up.type DERIVE" "\n" "up.negative down" "\n" @@ -120,7 +120,12 @@ int acquire() { /* fork ourselves if not asked otherwise */ char* no_fork = getenv("no_fork"); if (! no_fork || strcmp("1", no_fork)) { - if (fork()) return; + pid_t child_pid = fork(); + if (child_pid) { + printf("# acquire() launched as PID %d\n", child_pid); + return 0; + } + // we are the child, complete the daemonization /* Close standard IO */ @@ -139,9 +144,15 @@ int acquire() { /* Reading /proc/stat */ int f = open(PROC_STAT, O_RDONLY); + if ( f == -1 ) { + return fail("cannot open " PROC_STAT); + } /* open the spoolfile */ int cache_file = open(cache_filename, O_CREAT | O_APPEND | O_WRONLY, S_IRUSR | S_IWUSR); + if ( cache_file == -1 ) { + return fail("# cannot open cache_file"); + } /* loop each second */ while (1) { @@ -175,10 +186,10 @@ int acquire() { char if_id[64]; uint_fast64_t r_bytes, r_packets, r_errs, r_drop, r_fifo, r_frame, r_compressed, r_multicast; uint_fast64_t t_bytes, t_packets, t_errs, t_drop, t_fifo, t_frame, t_compressed, t_multicast; - sscanf(line, "%s" + sscanf(line, "%s" + " " + "%llu %llu %llu %llu %llu %llu %llu %llu" " " - "%llu %llu %llu %llu %llu %llu %llu %llu" - " " "%llu %llu %llu %llu %llu %llu %llu %llu" , if_id , &r_bytes, &r_packets, &r_errs, &r_drop, &r_fifo, &r_frame, &r_compressed, &r_multicast @@ -189,11 +200,11 @@ int acquire() { if_id[strlen(if_id) - 1] = '\0'; char out_buffer[1024]; - sprintf(out_buffer, + sprintf(out_buffer, "multigraph if_%s_1sec" "\n" "up.value %ld:%llu" "\n" "down.value %ld:%llu" "\n" - , if_id + , if_id , epoch, r_bytes , epoch, t_bytes ); @@ -213,6 +224,9 @@ int acquire() { int fetch() { FILE* cache_file = fopen(cache_filename, "r+"); + if ( !cache_file ) { + return acquire(); + } /* lock */ flock(fileno(cache_file), LOCK_EX); @@ -262,7 +276,7 @@ int main(int argc, char **argv) { } /***** DEMO - + /proc/net/dev sample Inter-| Receive | Transmit diff --git a/plugins/network/if_uptime b/plugins/network/if_uptime index c5c51d2a..9d42565d 100755 --- a/plugins/network/if_uptime +++ b/plugins/network/if_uptime @@ -19,7 +19,7 @@ # # Plugin | Used | Suggestions # ------ | ---- | ----------- -# if_uptime | yes | +# if_uptime | yes | # # 7. Restart munin: "/etc/init.d/munin-node restart" # 8. Hold on for 5 minutes at most and watch the graph appear. @@ -117,15 +117,13 @@ $current_uptime EOF } -# Munin routines +# Munin routines case "$1" in autoconf) if [[ $(ifconfig &> /dev/null; echo "$?") == 0 ]]; then echo yes - exit 0 else echo "no (ifconfig doesn't work out)" - exit 1 fi exit 0 ;; @@ -146,7 +144,7 @@ EOM done exit 0 ;; - *) + *) # Print data for Munin for (( i=0; i<"${#INTERFACES[*]}"; i++ )) do diff --git a/plugins/network/ifem_ b/plugins/network/ifem_ index 89d42bd2..13a0c60d 100755 --- a/plugins/network/ifem_ +++ b/plugins/network/ifem_ @@ -38,7 +38,7 @@ fi if [ "$1" = "config" ]; then - echo "graph_order rbytes obytes" + echo "graph_order rbytes obytes" echo "graph_title $INTERFACE traffic" echo 'graph_args --base 1000' echo 'graph_vlabel bits per ${graph_period} in (-) / out (+)' diff --git a/plugins/network/interfaces_linux_multi b/plugins/network/interfaces_linux_multi index ad158600..d9515725 100755 --- a/plugins/network/interfaces_linux_multi +++ b/plugins/network/interfaces_linux_multi @@ -2,18 +2,18 @@ ######################################################################## # Copyright (c) 2012, Adrien Urban # All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are -# met: -# +# met: +# # 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. +# notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the -# distribution. -# +# distribution. +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -102,7 +102,6 @@ if (!defined $ENV{MUNIN_PLUGSTATE}) { sub pidfile() { "$ENV{MUNIN_PLUGSTATE}/munin.$plugin.pid" } sub cachefile() { "$ENV{MUNIN_PLUGSTATE}/munin.$plugin.cache" } -sub graph_section() { "system:network" } sub graph_name() { "interfaces" } #sub graph_title() { "interfaces" } #sub graph_title_all() { "Overall CPU usage" } @@ -327,7 +326,7 @@ sub show_config() } print </dev/null 2>/dev/null if [ $? -gt 0 ]; then echo "no (could not run iptables-save as user `whoami`)" - exit 1 else echo yes - exit 0 fi else echo "no (/proc/net/dev not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then @@ -65,7 +63,7 @@ if [ "$1" = "config" ]; then echo 'in.label received' echo 'in.type DERIVE' echo 'in.min 0' - echo 'in.cdef in,8,*' + echo 'in.cdef in,8,*' exit 0 fi; diff --git a/plugins/network/ipfwnat_ b/plugins/network/ipfwnat_ index 3708729e..79056c9d 100755 --- a/plugins/network/ipfwnat_ +++ b/plugins/network/ipfwnat_ @@ -17,7 +17,7 @@ # # Furthermore, nat configuration must contain "log" option. E.g. # -# ipfw nat 123 config ip 198.76.28.4 log +# ipfw nat 123 config ip 198.76.28.4 log # # This plugin is based on the if_ plugin. # @@ -34,18 +34,18 @@ ipfw="/sbin/ipfw" if [ "$1" = "autoconf" ]; then if [ ! -x $ipfw ]; then echo "no ($ipfw not found)" - exit 1 + exit 0 fi err=$($ipfw nat show config 2>&1) if [ $? -ne 0 ]; then echo "no ($err)" - exit 1 + exit 0 fi echo "yes" exit 0 -fi +fi if [ "$1" = "suggest" ]; then $ipfw nat show config 2> /dev/null | /usr/bin/awk '/nat [0-9]+ .+ log/{print $3;}' diff --git a/plugins/network/ipset b/plugins/network/ipset new file mode 100755 index 00000000..8f8adacc --- /dev/null +++ b/plugins/network/ipset @@ -0,0 +1,80 @@ +#!/bin/sh +# -*- sh -*- + +: <<=cut + +=head1 NAME + +ipset - Graph number of members of netfilter ipsets + +=head1 APPLICABLE SYSTEMS + +Any system with a compatible ipset command. + +=head1 CONFIGURATION + +Ipset has to be run as root: + + [ipset] + user root + +=head1 INTERPRETATION + +This plugin draws number of members for each ipset present in the kernel + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 VERSION + 0.1 first release + 0.2 added docs, munin best practices + +=head1 BUGS + +None known + +=head1 AUTHOR + +Originally: Tomas Mudrunka 2016-2018 ( github.com/harvie ) + +=head1 LICENSE + +GPLv2 + +=cut + +set -eu + + +get_ipset_list() { + ipset list -n +} + + +if [ "${1:-}" = "autoconf" ]; then + if [ -e /sbin/ipset ] || [ -n "$(which ipset)" ]; then + echo 'yes' + else + echo 'no (ipset binary not present)' + fi + exit 0 +fi + +if [ "${1:-}" = "config" ]; then + echo graph_title Netfilter IPSets + echo graph_category network + echo graph_vlabel Members + echo graph_args --base 1000 --logarithmic --units=si + get_ipset_list | while read -r list; do + echo "$list.label $list" + echo "$list.min 0" + done + [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = 1 ] || exit 0 +fi + +get_ipset_list | while read -r list; do + echo "$list.value $(( $(ipset list "$list" | wc -l) - 7 ))" +done +exit 0 diff --git a/plugins/network/ipt_accounting_ b/plugins/network/ipt_accounting_ index bd5a2281..a22a23c2 100755 --- a/plugins/network/ipt_accounting_ +++ b/plugins/network/ipt_accounting_ @@ -42,22 +42,20 @@ ACC=`basename $0 | sed 's/^ipt_accounting_//g'` if [ "$1" = "autoconf" ]; then if [ -r /proc/net/dev ]; then - iptables -L INPUT -v -n -x >/dev/null 2>/dev/null + iptables -L INPUT -v -n -x -w >/dev/null 2>/dev/null if [ $? -gt 0 ]; then echo "no (could not run iptables as user `whoami`)" - exit 1 else echo yes - exit 0 fi else echo "no (/proc/net/dev not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then - iptables -L INPUT -v -x -n 2>/dev/null | sed -n 's/^.*\/\* ACC\-\([a-zA-Z]*\) \*\/.*$/\1/p' + iptables -L INPUT -v -x -n -w 2>/dev/null | sed -n 's/^.*\/\* ACC\-\([a-zA-Z]*\) \*\/.*$/\1/p' exit 0 fi @@ -79,5 +77,5 @@ if [ "$1" = "config" ]; then exit 0 fi; -iptables -L INPUT -v -n -x | grep -m1 "\/\* ACC\-"$ACC" \*\/" | awk "{ print \"in.value \" \$2 }" -iptables -L OUTPUT -v -n -x | grep -m1 "\/\* ACC\-"$ACC" \*\/" | awk "{ print \"out.value \" \$2 }" +iptables -L INPUT -v -n -x -w | grep -m1 "\/\* ACC\-"$ACC" \*\/" | awk "{ print \"in.value \" \$2 }" +iptables -L OUTPUT -v -n -x -w | grep -m1 "\/\* ACC\-"$ACC" \*\/" | awk "{ print \"out.value \" \$2 }" diff --git a/plugins/network/ipt_basic_ b/plugins/network/ipt_basic_ index 6f05638c..7d3019ab 100755 --- a/plugins/network/ipt_basic_ +++ b/plugins/network/ipt_basic_ @@ -22,7 +22,7 @@ # Revisions: # 2006.01.00 - First release. # 2006.11.26 - Use -j RETURN in rules, and sort interfaces -# +# # # Magic markers (optional - used by munin-config and some installation # scripts): @@ -45,18 +45,16 @@ iptables='/sbin/iptables' if [ "$1" = "autoconf" ]; then if [ -r /proc/net/dev ]; then - RES=`$iptables -L $TNAME -nvx 2>&1 >/dev/null` + RES=`$iptables -L $TNAME -nvx -w 2>&1 >/dev/null` if [ $? -gt 0 ]; then echo "no (could not run iptables as user `whoami`; $RES)" - exit 1 else echo yes - exit 0 fi else echo "no (/proc/net/dev not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then @@ -79,7 +77,7 @@ if [ "$1" = "initialise" ]; then exit 1 fi -IFACES=`$iptables -L munin_node -nvx | awk '$6 ~ /(eth|ppp)[0-9]/ { if (done[$6]!=1) {print $6; done[$6]=1;}}'` +IFACES=`$iptables -L munin_node -nvx -w | awk '$6 ~ /(eth|ppp)[0-9]/ { if (done[$6]!=1) {print $6; done[$6]=1;}}'` if [ "$1" = "config" ]; then @@ -94,7 +92,7 @@ if [ "$1" = "config" ]; then echo 'graph_args --base 1000' echo 'graph_category network' echo 'graph_info This graph shows the traffic of the interfaces in bits per second, and should be precise above 50Mbps as well. All forwarded traffic is measured in the incoming counter of the given interface.' - + for iface in $IFACES; do echo "$iface.label ${iface}_received" echo "$iface.type DERIVE" @@ -108,7 +106,7 @@ if [ "$1" = "config" ]; then fi; if [ "$TYPE" = "pkts" ]; then - $iptables -L munin_node -nvx | egrep "eth|ppp" | awk "{ print \$6 \".value \" \$1 }" + $iptables -L munin_node -nvx -w | egrep "eth|ppp" | awk "{ print \$6 \".value \" \$1 }" else - $iptables -L munin_node -nvx | egrep "eth|ppp" | awk "{ print \$6 \".value \" \$2 }" + $iptables -L munin_node -nvx -w | egrep "eth|ppp" | awk "{ print \$6 \".value \" \$2 }" fi diff --git a/plugins/network/ldap_connections b/plugins/network/ldap_connections index fc24b18b..68167959 100755 --- a/plugins/network/ldap_connections +++ b/plugins/network/ldap_connections @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the number of open connexions to LDAP # # $Log: ldap_connections,v $ @@ -13,7 +13,7 @@ # Modif pour OpenBSD # # Revision 1.6 2007/03/02 07:52:52 cvserver -# pas LISTEN pour les IPs utilises (en cas de *:389) +# pas LISTEN pour les IPs utilisées (en cas de *:389) # # Revision 1.5 2007/03/01 16:06:53 cvserver # corrections: @@ -34,14 +34,14 @@ # # # plugin-conf.d/-options: -# +# # netstat -- path to netstat executable # ports -- ldap ports used (389 and 636) # only used ones are graphed # socket -- ldapi socket (default: /var/run/openldap/ldapi) # # Parameters: -# +# # config (required) # autoconf (optional - used by munin-config) # @@ -99,12 +99,11 @@ if [ "$1" = "autoconf" ]; then done if [ -n "$ONE_LISTENING" ]; then - echo yes - exit 0 + echo "yes" else - echo no '(no slapd listening on '$PORTS')' - exit 1 + echo "no (no slapd listening on $PORTS)" fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/network/linux_if/linux_if_bonding.png b/plugins/network/linux_if/example-graphs/linux_if-1.png similarity index 100% rename from plugins/network/linux_if/linux_if_bonding.png rename to plugins/network/linux_if/example-graphs/linux_if-1.png diff --git a/plugins/network/linux_if/linux_if_vlans.png b/plugins/network/linux_if/example-graphs/linux_if-2.png similarity index 100% rename from plugins/network/linux_if/linux_if_vlans.png rename to plugins/network/linux_if/example-graphs/linux_if-2.png diff --git a/plugins/network/linux_if/linux_if b/plugins/network/linux_if/linux_if index e9d6b89e..19944008 100755 --- a/plugins/network/linux_if/linux_if +++ b/plugins/network/linux_if/linux_if @@ -17,12 +17,12 @@ plugin configuration: # run plugin as root (required if you have VLAN sub-interfaces) user = root - # comma separated list of intreface patterns to exclude from monitoring + # comma separated list of interface patterns to exclude from monitoring # default: lo # example: env.exclude = lo,vnet* - # comma separated list of intreface patterns to include in monitoring + # comma separated list of interface patterns to include in monitoring # default: (empty) # example: env.include = br_* @@ -43,7 +43,7 @@ Tested on: RHEL 6.x and clones (with Python 2.6) TODO: * implement 'data loaning' between graphs, removes duplicit measures * add support for bridging -* configurable graph max based on intreface speed +* configurable graph max based on interface speed MUNIN MAGIC MARKER #%# family=manual diff --git a/plugins/network/mtr100_ b/plugins/network/mtr100_ index 44ed34f3..b84d85f9 100755 --- a/plugins/network/mtr100_ +++ b/plugins/network/mtr100_ @@ -34,17 +34,15 @@ totrace=`basename $0 | sed 's/^mtr100_//g'` if [ "$1" = "autoconf" ]; then if ( mtr -nrc 1 localhost 2>/dev/null >/dev/null ); then echo yes - exit 0 else if [ $? -eq 127 ] then echo "no (mtr program not found - install the mtr(-tiny) package)" - exit 1 else echo no - exit 1 fi fi + exit 0 exit 0 fi diff --git a/plugins/network/multibandwidth b/plugins/network/multibandwidth old mode 100644 new mode 100755 index 4f4f7298..e1a763ac --- a/plugins/network/multibandwidth +++ b/plugins/network/multibandwidth @@ -6,11 +6,11 @@ =head1 NAME -multibandwidth - Plugin to monitor the bandwidth between localhost and serveral hosts. +multibandwidth - Plugin to monitor the bandwidth between localhost and several hosts. =head1 APPLICABLE SYSTEMS -All systems with “bash”, and “munin” +All systems with "bing" installed. =head1 REQUIREMENTS @@ -20,26 +20,33 @@ You can install bing by using (Ubuntu/Debian): apt-get install bing =head1 CONFIGURATION -The following is the default configuration +The following example configuration shows all settings. Only "hosts" is required for +minimal configuration. [multibandwidth] user root env.hosts example.org example2.org example3.org -env.samples 10 +env.samples 15 env.small_packet_size 44 env.big_packet_size 108 +env.max_valid_bps 15728640 -- env.hosts explanation: hostname or IP of the hosts to calculate the bandwidth. +- env.hosts: space separated list of hostnames or IPs of the hosts to calculate the bandwidth. + This setting is required. -- env.samples explanation: Reset stats after sending samples ECHO_REQUEST packets. +- env.samples: Reset stats after sending this number of ECHO_REQUEST packets. + Defaults to 15 samples. -- env.small_packet_size explanation: Specifies the number of data bytes to be sent in the small +- env.small_packet_size: Specifies the number of data bytes to be sent in the small packets. The default and minimum value is 44. -- env.big_packet_size explanation: Specifies the number of data bytes to be sent in the big +- env.big_packet_size: Specifies the number of data bytes to be sent in the big packets. The default is 108. The size should be chosen so that big packet roundtrip times are long enough to be accurately measured. +- env.max_valid_bps: bing have some random spikes. This variable is used to indicate + the maximum value of mbps that can be recorded (in bps). + Defaults to the empty string (no validity check). =head1 MAGIC MARKERS @@ -61,52 +68,63 @@ GPLv2 =cut +hosts=${hosts:-} +samples=${samples:-15} +small_packet_size=${small_packet_size:-44} +big_packet_size=${big_packet_size:-108} +max_valid_bps=${max_valid_bps:-15728640} + case $1 in config) - echo graph_title MultiBandwidth - echo 'graph_vlabel bps' - echo 'graph_args --base 1024 -l 0' - echo 'graph_scale yes' - echo 'graph_category network' - echo 'graph_info This graph shows the bandwidth between localhost and serveral hosts' - for host in $hosts; do - fieldname="host_$(clean_fieldname "$host")" - echo "$fieldname.label $host" - echo "$fieldname.draw LINE2" - echo "$fieldname.info Bandwidth statistics for $host" - done - exit 0;; + echo graph_title MultiBandwidth + echo 'graph_vlabel bps' + echo 'graph_args --base 1024 -l 0' + echo 'graph_scale yes' + echo 'graph_category network' + echo 'graph_info This graph shows the bandwidth between localhost and several hosts' + for host in $hosts; do + fieldname="host_$(clean_fieldname "$host")" + echo "$fieldname.label $host" + echo "$fieldname.draw LINE2" + echo "$fieldname.info Bandwidth statistics for $host" + done + exit 0 + ;; autoconf) - if hash bing 2>/dev/null; then - echo 'yes' - exit 0; - else - echo 'no (bing not installed)' - exit 0; - fi - + if command -v bing 2>/dev/null; then + echo 'yes' + else + echo 'no (bing not installed)' + fi + exit 0 + ;; esac -#Calculating the bandwidth +# Calculating the bandwidth for host in $hosts; do fieldname="host_$(clean_fieldname "$host")" - printf "$fieldname.value "; SPEED=$(timeout 6 bing localhost "$host" -n -c 1 -e "$samples" -s "$small_packet_size" -S "$big_packet_size" 2>/dev/null \ - |grep "estimated link" -A 2 \ + | grep "estimated link" -A 2 \ | grep bps \ | awk '{print $2}' \ | cut -d "b" -f1) - if (echo "$SPEED" | grep -q "M"); then - echo "$SPEED" | awk '{a+=$1} END{print a*1000000}' - elif (echo "$SPEED" | grep -q "K"); then - echo "$SPEED" | awk '{a+=$1} END{print a*1000}' - elif (echo "$SPEED" | grep -q "G"); then - echo "$SPEED" | awk '{a+=$1} END{print a*1000000000}' + if echo "$SPEED" | grep -q "M"; then + RATE=$(echo "$SPEED" | awk '{ print int($1 * 1024 * 1024); }') + elif echo "$SPEED" | grep -q "K"; then + RATE=$(echo "$SPEED" | awk '{ print int($1 * 1024); }') + elif echo "$SPEED" | grep -q "G"; then + RATE=$(echo "$SPEED" | awk '{ print int($1 * 1024 * 1024 * 1024); }') else + RATE="U" echo "Error: no data (timeout)" >&2 fi + if [ -n "$max_valid_bps" ] && [ "$RATE" -gt "$max_valid_bps" ]; then + # the value is outside of the allowed range; discard it + RATE="U" + fi + echo "${fieldname}.value $RATE" done diff --git a/plugins/other/multipng_async b/plugins/network/multiping_async similarity index 100% rename from plugins/other/multipng_async rename to plugins/network/multiping_async diff --git a/plugins/network/netatalk b/plugins/network/netatalk index 9ac211eb..86b59da7 100755 --- a/plugins/network/netatalk +++ b/plugins/network/netatalk @@ -124,7 +124,7 @@ fi echo "proc.value" $(ps ax --no-headers -o command | grep "^$afpdPath" | wc -l) # Connected users (user): -# We will ignore root (having UID=0 it's line will be first) (assomption done: there will have only one line corresponding to root in `ps` output) +# We will ignore root (having UID=0 it's line will be first) (assumption done: there will have only one line corresponding to root in `ps` output) connectedUsers=$(ps anx --no-headers -o uid,command | sed 's/^ *//g' | grep "^[0-9]* $afpdPath" | sort -n | tail -n +2 | awk '{print $1}') echo "user.value" `echo $connectedUsers | wc -w` @@ -144,7 +144,7 @@ for shareName in `cat $defaultServer_volumesFile | grep "^[^#:]" | grep -oP "^([ for currentUid in $connectedUsers; do # For each connected users currentUserHomeDir=`getent passwd $currentUid | cut -d ':' -f6` # Fetch it's the home directory currentUserHomeDir=`readlink -f "$currentUserHomeDir"` # We want the realpath (resolves symbolic links and normalize the path) - + #FIX: We use pipe `lsof` outputs to `echo -e` with `xargs` because lsof "displays only printable ASCII characters" (cf. http://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/00FAQ #14.5) # Then if a share with non-ASCII characters in it's path were to be opened, lsof would return them on \xNN form and grep wouldn't match: `echo -e /the/path` fixes this [ `$baseLsofCommand -F n | xargs -0 echo -e | grep "^n$currentUserHomeDir" | wc -l` -gt 0 ] && let openShares++ # If found in lsof output: increment the openShares counter diff --git a/plugins/network/netatalk3 b/plugins/network/netatalk3 index fc766804..96f0816c 100755 --- a/plugins/network/netatalk3 +++ b/plugins/network/netatalk3 @@ -48,7 +48,7 @@ function count_connected_users () { local afpd_bin_path=$1 afpd_procs=$(ps anx --no-headers -o uid,command |grep -E "\w+\d+*\s${afpd_bin_path}" |wc -l) # one of those processes will be always from root user, so it's not being - # used to externaly connect volumes, therefor being disconsider. + # used to externally connect volumes, therefore being ignored. echo $(echo "${afpd_procs} - 1" |bc) } @@ -119,7 +119,7 @@ EOM #### Boilerplates #################################################### # Locating AFP related files: # * Binary: Using the first result of "which" command; - # * Config: Using "afpd" with parameters to pring configuration file + # * Config: Using "afpd" with parameters to print configuration file # location; # diff --git a/plugins/network/netstat_s_ b/plugins/network/netstat_s_ new file mode 100755 index 00000000..2aa209de --- /dev/null +++ b/plugins/network/netstat_s_ @@ -0,0 +1,430 @@ +#!/usr/bin/env ruby + +=begin + +netstat_s revision 6 (Nov 2013) + +This plugin shows various statistics from 'netstat -s' + +Required privileges: none + +OS: + Supposed: BSD, Linux (only a few items, see netstat_multi for more) + Tested: FreeBSD: 8.2, 8.3, 9.1 + Linux : Debian 6 (kernel 2.6.32), Arch (kernel 3.11.6), CentOS 6 + +Author: Artem Sheremet + +#%# family=auto +#%# capabilities=autoconf suggest + +=end + +# original filename +PLUGIN_NAME = 'netstat_s_'.freeze + +$os = `uname -s`.strip.downcase.to_sym +$debug_mode = ARGV.first == 'debug' + +class String + def escape + gsub(/[^\w]/, '_') + end + + unless method_defined? :start_with? + def start_with?(str) + self[0...str.size] == str + end + end + + unless method_defined? :lines + def lines + split($/).to_enum + end + end +end + +class Graph + def initialize(name, protocol, parse_expr) + @name = name + @protocol = protocol + @parse_expr = parse_expr + end + + def config + config_options = [] + + # first, build a list of multigraphs (one graph per unit) + # Hash key is unit, and the value is array of labels + multigraphs = {} + @parse_expr.each do |_expr, descr| + next unless descr # no label - skip this entry + + descr.each do |entry| + labels_array = (multigraphs[entry[0]] ||= []) + labels_array.push [entry[1], entry[2]] + end + end + + multigraphs.each_pair do |unit, labels_and_negatives| + # now just add options to the config + + config_options.concat [ + "multigraph #{name(unit)}", + "graph_title Netstat: #{@protocol}: #{@name}#{" (#{unit})" if multigraphs.size > 1}", + 'graph_category network', + "graph_order #{labels_and_negatives.map { |label, _negative| label.escape }.join(' ')}" + ] + + config_options.push 'graph_args --base 1024' if unit == :bytes + has_negatives = false + + labels_and_negatives.each do |label, negative| + label_esc = label.escape + has_negatives = true unless negative.nil? + + if negative == true + # the value has no opposite and is negative + config_options.concat [ + "#{label_esc}.graph no", + "#{label_esc}_neg.type DERIVE", + "#{label_esc}_neg.min 0", + "#{label_esc}_neg.draw LINE", + "#{label_esc}_neg.label #{label}", + "#{label_esc}_neg.negative #{label_esc}" + ] + else + config_options.concat [ + "#{label_esc}.type DERIVE", + "#{label_esc}.min 0", + "#{label_esc}.draw LINE", + "#{label_esc}.label #{label}" + ] + end + + if negative == false + # the value has no opposite and is positive + config_options.concat [ + "#{label_esc}_neg.graph off", + "#{label_esc}.negative #{label_esc}_neg" + ] + elsif negative + negative_esc = negative.escape + config_options.concat [ + "#{label_esc}.negative #{negative_esc}", + "#{negative_esc}.graph no" + ] + end + end + + config_options.push "graph_vlabel per second#{' in (-) / out (+)' if has_negatives}" + end + + config_options + end + + def fetch(data) + output_data = [] + + # first build a set of multigraphs, one per unit. + # Hash key is unit, and the value is a hash of 'escaped label' => 'value' + multigraphs = {} + @parse_expr.each do |expr, descr| + next unless descr # no label - skip this entry + + index = data.index { |line| line =~ expr } + if index + data.delete_at index + $~[1..-1].zip(descr).each do |value, info| + unit, label = info + (multigraphs[unit] ||= {})[label.escape] = value + end + else + warn "no line found for #{expr}, #{descr}" if $debug_mode + end + end + + multigraphs.each_pair do |unit, values| + output_data.push "multigraph #{name(unit)}" + output_data += values.map { |label, value| "#{label}.value #{value}" } + end + + output_data + end + + def name(unit) + "#{PLUGIN_NAME}#{@protocol}_#{@name.escape}_#{unit}" + end +end + +def graphs_for(protocol) + case protocol + # Order of the graps in each section is important for parsing. + # At the same time, it is not important for munin, so we are OK placing it in parsing order here. + when 'tcp' + if $os == :linux + [ + Graph.new('sent', protocol, [ + # Description of the elements of arrays below: + # 0: regexp to parse the line + # 1: Array for each matching group in the regular expression. + # 0: unit name + # 1: label + # 2 (optional): negative label + # It could be reasonable to add more elements as warning and critical values. + + [/(\d+) segments send out$/, [[:segments, 'total']]], + [/(\d+) segments retransmitted$/, [[:segments, 'retransmitted']]] + ]), + + Graph.new('received', protocol, [ + [/(\d+) segments received$/, [[:segments, 'total']]], + [/(\d+) bad segments received.$/, [[:segments, 'bad']]] + ]), + + Graph.new('connections', protocol, [ + [/(\d+) active connections openings$/, [[:connections, 'active openings']]], + [/(\d+) passive connection openings$/, [[:connections, 'passive openings']]], + [/(\d+) failed connection attempts$/, [[:connections, 'failed attempts']]], + [/(\d+) connection resets received$/, [[:connections, 'RST received']]], + [/(\d+) connections established$/, [[:connections, 'established']]], + [/(\d+) resets sent$/, [[:connections, 'RST sent']]] + ]), + + Graph.new('timeouts', protocol, [ + [/(\d+) timeouts after SACK recovery$/, [[:segments, 'after SACK recovery']]], + [/(\d+) other TCP timeouts$/, [[:segments, 'other TCP']]], + [/(\d+) timeouts in loss state$/, [[:segments, 'in a loss state']]] + ]) + ] + else + [ + Graph.new('sent', protocol, [ + [/(\d+) packets sent$/, [[:packets, 'total']]], + [/(\d+) data packets \((\d+) bytes\)$/, [[:packets, 'data'], [:bytes, 'data']]], + [/(\d+) data packets \((\d+) bytes\) retransmitted$/, [[:packets, 'retransmitted'], [:bytes, 'retransmitted']]], + [/(\d+) data packets unnecessarily retransmitted$/, [[:packets, 'unnecessarily retransmitted']]], + [/(\d+) resends initiated by MTU discovery$/, [[:packets, 'resends initiated by MTU discovery']]], + [/(\d+) ack-only packets \((\d+) delayed\)$/, [[:packets, 'ack-only'], [:packets, 'ack-only delayed']]], + [/(\d+) URG only packets$/, [[:packets, 'URG only']]], + [/(\d+) window probe packets$/, [[:packets, 'window probe']]], + [/(\d+) window update packets$/, [[:packets, 'window update']]], + [/(\d+) control packets$/, [[:packets, 'control']]] + ]), + + Graph.new('received', protocol, [ + [/(\d+) packets received$/, [[:packets, 'total']]], + [/(\d+) acks \(for (\d+) bytes\)$/, [[:packets, 'acks'], [:bytes, 'acks']]], + [/(\d+) duplicate acks$/, [[:packets, 'duplicate acks']]], + [/(\d+) acks for unsent data$/, [[:packets, 'acks for unsent data']]], + [/(\d+) packets \((\d+) bytes\) received in-sequence$/, [[:packets, 'in-sequence'], [:bytes, 'in-sequence']]], + [/(\d+) completely duplicate packets \((\d+) bytes\)$/, [[:packets, 'completely duplicate'], [:bytes, 'completely duplicate']]], + [/(\d+) old duplicate packets$/, [[:packets, 'old duplicate']]], + [/(\d+) packets with some dup\. data \((\d+) bytes duped\)$/, [[:packets, 'some dup. data'], [:bytes, 'partial dups']]], + [/(\d+) out-of-order packets \((\d+) bytes\)$/, [[:packets, 'out-of-order'], [:bytes, 'out-of-order']]], + [/(\d+) packets \((\d+) bytes\) of data after window$/, [[:packets, 'data after window'], [:bytes, 'data after window']]], + [/(\d+) window probes$/, [[:packets, 'window probes']]], + [/(\d+) window update packets$/, [[:packets, 'window update']]], + [/(\d+) packets received after close$/, [[:packets, 'after close']]], + [/(\d+) discarded for bad checksums$/, [[:packets, 'bad checksums']]], + [/(\d+) discarded for bad header offset fields?$/, [[:packets, 'bad header offset flds']]], + [/(\d+) discarded because packet too short$/, [[:packets, 'too short']]], + [/(\d+) discarded due to memory problems$/, [[:packets, 'discarded: memory problems']]], + [/(\d+) ignored RSTs in the windows$/, [[:packets, 'ignored RSTs in windows']]], + [/(\d+) segments updated rtt \(of (\d+) attempts\)$/, [[:packets, 'RTT: updated'], [:packets, 'RTT: attempts to update']]] + ]), + + Graph.new('connections', protocol, [ + [/(\d+) connection requests$/, [[:connections, 'requests']]], + [/(\d+) connection accepts$/, [[:connections, 'accepts']]], + [/(\d+) bad connection attempts$/, [[:connections, 'bad attempts']]], + [/(\d+) listen queue overflows$/, [[:connections, 'listen queue overflows']]], + [/(\d+) connections established \(including accepts\)$/, [[:connections, 'established']]], + [/(\d+) connections closed \(including (\d+) drops\)$/, [[:connections, 'closed'], [:connections, 'dropped']]], + [/(\d+) connections updated cached RTT on close$/, [[:connections, 'closed & upd cached RTT']]], + [/(\d+) connections updated cached RTT variance on close$/, [[:connections, 'closed & upd cached RTT variance']]], + [/(\d+) connections updated cached ssthresh on close$/, [[:connections, 'closed & upd cached ssthresh']]], + [/(\d+) embryonic connections dropped$/, [[:connections, 'embryonic dropped']]] + ]), + + Graph.new('timeouts', protocol, [ + [/(\d+) retransmit timeouts$/, [[:connections, 'retransmit']]], + [/(\d+) connections dropped by rexmit timeout$/, [[:connections, 'retransmit: dropped']]], + [/(\d+) persist timeouts$/, [[:connections, 'persist']]], + [/(\d+) connections dropped by persist timeout$/, [[:connections, 'persist: dropped']]], + [/(\d+) Connections \(fin_wait_2\) dropped because of timeout$/, [[:connections, 'fin_wait_2: dropped']]], + [/(\d+) keepalive timeouts$/, [[:connections, 'keepalive']]], + [/(\d+) keepalive probes sent$/, [[:connections, 'keepalive: probes sent']]], + [/(\d+) connections dropped by keepalive$/, [[:connections, 'keepalive: dropped']]] + ]), + + Graph.new('correct predictions', protocol, [ + [/(\d+) correct ACK header predictions$/, [[:predictions, 'ACK header']]], + [/(\d+) correct data packet header predictions$/, [[:predictions, 'data packet header']]] + ]), + + Graph.new('SYN', protocol, [ + [/(\d+) syncache entries added$/, [[:entries, 'cache added']]], + [/(\d+) cookies sent$/, [[:entries, 'cookies sent']]], + [/(\d+) cookies received$/, [[:entries, 'cookies received']]], + [/(\d+) retransmitted$/, [[:entries, 'retransmitted']]], + [/(\d+) dupsyn$/, [[:entries, 'duplicates']]], + [/(\d+) dropped$/, [[:entries, 'dropped']]], + [/(\d+) completed$/, [[:entries, 'completed']]], + [/(\d+) bucket overflow$/, [[:entries, 'bucket overflow']]], + [/(\d+) cache overflow$/, [[:entries, 'cache overflow']]], + [/(\d+) reset$/, [[:entries, 'reset']]], + [/(\d+) stale$/, [[:entries, 'stale']]], + [/(\d+) aborted$/, [[:entries, 'aborted']]], + [/(\d+) badack$/, [[:entries, 'bad ACK']]], + [/(\d+) unreach$/, [[:entries, 'unreachable']]], + [/(\d+) zone failures$/, [[:entries, 'zone failures']]], + [/(\d+) hostcache entries added$/, [[:entries, 'hostcache added']]], + [/(\d+) bucket overflow$/, [[:entries, 'hostcache overflow']]] + ]), + + Graph.new('SACK', protocol, [ + [/(\d+) SACK recovery episodes$/, [[:packets, 'recovery episodes']]], + [/(\d+) segment rexmits in SACK recovery episodes$/, [[:packets, 'segment rexmits']]], + [/(\d+) byte rexmits in SACK recovery episodes$/, [[:bytes, 'bytes rexmitted']]], + [/(\d+) SACK options \(SACK blocks\) received$/, [[:packets, 'options blocks rcvd']]], + [/(\d+) SACK options \(SACK blocks\) sent$/, [[:packets, 'options blocks sent']]], + [/(\d+) SACK scoreboard overflow$/, [[:packets, 'scoreboard overflow']]] + ]), + + Graph.new('ECN', protocol, [ + [/(\d+) packets with ECN CE bit set$/, [[:packets, 'CE bit']]], + [/(\d+) packets with ECN ECT\(0\) bit set$/, [[:packets, 'ECT(0) bit']]], + [/(\d+) packets with ECN ECT\(1\) bit set$/, [[:packets, 'ECT(1) bit']]], + [/(\d+) successful ECN handshakes$/, [[:packets, 'successful handshakes']]], + [/(\d+) times ECN reduced the congestion window$/, [[:packets, 'congestion window reduced']]] + ]) + ] +end + when 'udp' + if $os == :linux + [] + else + [ + Graph.new('received', protocol, [ + [/(\d+) datagrams received$/, [[:packets, 'total']]], + [/(\d+) with incomplete header$/, [[:packets, 'incomplete header']]], + [/(\d+) with bad data length field$/, [[:packets, 'bad data length field']]], + [/(\d+) with bad checksum$/, [[:packets, 'bad checksum']]], + [/(\d+) with no checksum$/, [[:packets, 'no checksum']]], + [/(\d+) dropped due to no socket$/, [[:packets, 'dropped: no socket']]], + [%r{(\d+) broadcast/multicast datagrams undelivered$}, [[:packets, '*cast undelivered']]], + [/(\d+) dropped due to full socket buffers$/, [[:packets, 'dropped: no buffers']]], + [/(\d+) not for hashed pcb$/, [[:packets, 'not for hashed pcb']]], + [/(\d+) delivered$/, [[:packets, 'delivered']]] + ]), + + Graph.new('sent', protocol, [ + [/(\d+) datagrams output$/, [[:packets, 'total']]], + [/(\d+) times multicast source filter matched$/, [[:packets, 'multicast src filter match']]] + ]) + ] +end + when 'ip' + if $os == :linux + [] + else + [ + Graph.new('received', protocol, [ + [/(\d+) total packets received$/, [[:packets, 'total']]], + [/(\d+) bad header checksums$/, [[:packets, 'bad header checksum']]], + [/(\d+) with size smaller than minimum$/, [[:packets, 'size smaller than min']]], + [/(\d+) with data size < data length$/, [[:packets, 'data size < data length']]], + [/(\d+) with ip length > max ip packet size$/, [[:packets, 'ip length > max ip packet sz']]], + [/(\d+) with header length < data size$/, [[:packets, 'header length < data size']]], + [/(\d+) with data length < header length$/, [[:packets, 'data length < header length']]], + [/(\d+) with bad options$/, [[:packets, 'bad options']]], + [/(\d+) with incorrect version number$/, [[:packets, 'incorrect version']]], + [/(\d+) fragments? received$/, [[:packets, 'fragments']]], + [/(\d+) fragments? dropped \(dup or out of space\)$/, [[:packets, 'frags dropped: dup/out of spc']]], + [/(\d+) fragments? dropped after timeout$/, [[:packets, 'frags dropped: timeout']]], + [/(\d+) packets? reassembled ok$/, [[:packets, 'reassembled ok']]], + [/(\d+) packets? for this host$/, [[:packets, 'for this host']]], + [%r{(\d+) packets? for unknown/unsupported protocol$}, [[:packets, 'for unknown/unsup protocol']]], + [/(\d+) packets? forwarded \((\d+) packets fast forwarded\)$/, [[:packets, 'forwarded'], [:packets, 'fast forwarded']]], + [/(\d+) packets? not forwardable$/, [[:packets, 'not forwardable']]], + [/(\d+) packets? received for unknown multicast group$/, [[:packets, 'unknown multicast grp']]] + ]), + + Graph.new('sent', protocol, [ + [/(\d+) packets? sent from this host$/, [[:packets, 'total']]], + [/(\d+) redirects? sent$/, [[:packets, 'redirect']]], + [/(\d+) packets? sent with fabricated ip header$/, [[:packets, 'fabricated IP head']]], + [/(\d+) output packets? dropped due to no bufs, etc\.$/, [[:packets, 'dropped: no bufs, etc']]], + [/(\d+) output packets? discarded due to no route$/, [[:packets, 'discarded: no route']]], + [/(\d+) output datagrams? fragmented$/, [[:packets, 'fragmented']]], + [/(\d+) fragments? created$/, [[:packets, 'fragments created']]], + [/(\d+) datagrams? that can't be fragmented$/, [[:packets, "can't be fragmented"]]], + [/(\d+) tunneling packets? that can't find gif$/, [[:packets, 'tunneling, gif not found']]], + [/(\d+) datagrams? with bad address in header$/, [[:packets, 'bad address in header']]] + ]) + ] +end + when 'arp' + if $os == :linux + [] + else + [ + Graph.new('packets', protocol, [ + # This is just a total, so ignore the value but keep regexp to avoid 'not parsed' warning. + [/(\d+) ARP packets? received$/], + [/(\d+) ARP requests? received$/, [[:packets, 'requests received']]], + [/(\d+) ARP repl(?:y|ies) received$/, [[:packets, 'replies received']]], + [/(\d+) ARP requests? sent$/, [[:packets, 'requests', 'requests received']]], + [/(\d+) ARP repl(?:y|ies) sent$/, [[:packets, 'replies', 'replies received']]], + [/(\d+) total packets? dropped due to no ARP entry$/, [[:packets, 'no entry']]] + ]), + + Graph.new('entries', protocol, [ + [/(\d+) ARP entrys? timed out$/, [[:entries, 'timed out']]], + [/(\d+) Duplicate IPs seen$/, [[:entries, 'duplicate IPs seen']]] + ]) + ] +end + end +end + +proto_name = File.basename($0, '.*').escape +proto_name.slice! 0, PLUGIN_NAME.size if proto_name.start_with? PLUGIN_NAME + +proto_name = 'tcp' if proto_name.empty? + +def netstat_s(protocol) + if $os == :linux + if %w[tcp udp].include?(protocol) + `netstat -s --#{protocol}` + else + `netstat -s --raw` +end + else + `netstat -sp #{protocol}` + end.lines.reject { |line| line =~ /^\w+:/ } +end + +case ARGV.first +when 'autoconf' + puts %i[linux freebsd].include?($os) ? 'yes' : 'no' +when 'suggest' + puts $os == :linux ? %w[tcp] : %w[tcp udp ip arp] +when 'config' + graphs_for(proto_name).each do |graph| + puts graph.config.join $/ + end +else + data = netstat_s(proto_name) + graphs_for(proto_name).each do |graph| + puts graph.fetch(data).join $/ + end + + if $debug_mode + warn "not parsed:\n#{data.join}" unless data.empty? +end +end + +# awful performance when scrolling through those regexps above +# vim: syntax=none diff --git a/plugins/network/netstat_s_/netstat_s_ b/plugins/network/netstat_s_/netstat_s_ deleted file mode 100755 index 39bbbba4..00000000 --- a/plugins/network/netstat_s_/netstat_s_ +++ /dev/null @@ -1,405 +0,0 @@ -#!/usr/bin/env ruby - -# netstat_s revision 6 (Nov 2013) -# -# This plugin shows various statistics from 'netstat -s' -# -# Required privileges: none -# -# OS: -# Supposed: BSD, Linux (only a few items, see netstat_multi for more) -# Tested: FreeBSD: 8.2, 8.3, 9.1 -# Linux : Debian 6 (kernel 2.6.32), Arch (kernel 3.11.6), CentOS 6 -# -# Author: Artem Sheremet -# - -#%# family=auto -#%# capabilities=autoconf suggest - -# original filename -PLUGIN_NAME = 'netstat_s_' - -$os = `uname -s`.strip.downcase.to_sym -$debug_mode = ARGV.first == 'debug' - -class String - def escape - self.gsub /[^\w]/, '_' - end - - unless method_defined? :start_with? - def start_with?(str) - self[0...str.size] == str - end - end - - unless method_defined? :lines - def lines - self.split($/).to_enum - end - end -end - -class Graph - def initialize(name, protocol, parse_expr) - @name, @protocol, @parse_expr = name, protocol, parse_expr - end - - def config - config_options = [] - - # first, build a list of multigraphs (one graph per unit) - # Hash key is unit, and the value is array of labels - multigraphs = {} - @parse_expr.each { |expr, descr| - next unless descr # no label - skip this entry - descr.each { |entry| - labels_array = (multigraphs[entry[0]] ||= []) - labels_array.push [entry[1], entry[2]] - } - } - - multigraphs.each_pair { |unit, labels_and_negatives| - # now just add options to the config - - config_options.concat [ - "multigraph #{name(unit)}", - "graph_title Netstat: #{@protocol}: #{@name}#{" (#{unit})" if multigraphs.size > 1}", - "graph_category network", - "graph_order #{labels_and_negatives.map { |label, _negative| label.escape }.join(' ')}" - ] - - config_options.push "graph_args --base 1024" if unit == :bytes - has_negatives = false - - labels_and_negatives.each { |label, negative| - label_esc = label.escape - has_negatives = true unless negative == nil - - if negative == true - # the value has no opposite and is negative - config_options.concat [ - "#{label_esc}.graph no", - "#{label_esc}_neg.type DERIVE", - "#{label_esc}_neg.min 0", - "#{label_esc}_neg.draw LINE", - "#{label_esc}_neg.label #{label}", - "#{label_esc}_neg.negative #{label_esc}" - ] - else - config_options.concat [ - "#{label_esc}.type DERIVE", - "#{label_esc}.min 0", - "#{label_esc}.draw LINE", - "#{label_esc}.label #{label}" - ] - end - - if negative == false - # the value has no opposite and is positive - config_options.concat [ - "#{label_esc}_neg.graph off", - "#{label_esc}.negative #{label_esc}_neg" - ] - elsif negative - negative_esc = negative.escape - config_options.concat [ - "#{label_esc}.negative #{negative_esc}", - "#{negative_esc}.graph no" - ] - end - } - - config_options.push "graph_vlabel per second#{" in (-) / out (+)" if has_negatives}" - } - - config_options - end - - def fetch(data) - output_data = [] - - # first build a set of multigraphs, one per unit. - # Hash key is unit, and the value is a hash of 'escaped label' => 'value' - multigraphs = {} - @parse_expr.each { |expr, descr| - next unless descr # no label - skip this entry - index = data.index { |line| line =~ expr } - if index - data.delete_at index - $~[1..-1].zip(descr).each { |value, info| - unit, label = info - (multigraphs[unit] ||= {})[label.escape] = value - } - else - warn "no line found for #{expr}, #{descr}" if $debug_mode - end - } - - multigraphs.each_pair { |unit, values| - output_data.push "multigraph #{name(unit)}" - output_data += values.map { |label, value| "#{label}.value #{value}" } - } - - output_data - end - - def name(unit) - "#{PLUGIN_NAME}#{@protocol}_#{@name.escape}_#{unit}" - end -end - -def graphs_for(protocol) - case protocol - # Order of the graps in each section is important for parsing. - # At the same time, it is not important for munin, so we are OK placing it in parsing order here. - when 'tcp' - $os == :linux ? [ - Graph.new('sent', protocol, [ - # Description of the elements of arrays below: - # 0: regexp to parse the line - # 1: Array for each matching group in the regular expression. - # 0: unit name - # 1: label - # 2 (optional): negative label - # It could be reasonable to add more elements as warning and critical values. - - [ /(\d+) segments send out$/, [ [ :segments, 'total' ] ] ], - [ /(\d+) segments retransmited$/, [ [ :segments, 'retransmitted' ] ] ] - ]), - - Graph.new('received', protocol, [ - [ /(\d+) segments received$/, [ [ :segments, 'total' ] ] ], - [ /(\d+) bad segments received.$/, [ [ :segments, 'bad' ] ] ] - ]), - - Graph.new('connections', protocol, [ - [ /(\d+) active connections openings$/, [ [ :connections, 'active openings' ] ] ], - [ /(\d+) passive connection openings$/, [ [ :connections, 'passive openings' ] ] ], - [ /(\d+) failed connection attempts$/, [ [ :connections, 'failed attempts' ] ] ], - [ /(\d+) connection resets received$/, [ [ :connections, 'RST received' ] ] ], - [ /(\d+) connections established$/, [ [ :connections, 'established' ] ] ], - [ /(\d+) resets sent$/, [ [ :connections, 'RST sent' ] ] ] - ]), - - Graph.new('timeouts', protocol, [ - [ /(\d+) timeouts after SACK recovery$/, [ [ :segments, 'after SACK recovery' ] ] ], - [ /(\d+) other TCP timeouts$/, [ [ :segments, 'other TCP' ] ] ], - [ /(\d+) timeouts in loss state$/, [ [ :segments, 'in a loss state' ] ] ] - ]) - ] : [ - Graph.new('sent', protocol, [ - [ /(\d+) packets sent$/, [ [ :packets, 'total' ] ] ], - [ /(\d+) data packets \((\d+) bytes\)$/, [ [ :packets, 'data' ], [ :bytes, 'data' ] ] ], - [ /(\d+) data packets \((\d+) bytes\) retransmitted$/, [ [ :packets, 'retransmitted' ], [ :bytes, 'retransmitted' ] ] ], - [ /(\d+) data packets unnecessarily retransmitted$/, [ [ :packets, 'unnecessarily retransmitted' ] ] ], - [ /(\d+) resends initiated by MTU discovery$/, [ [ :packets, 'resends initiated by MTU discovery' ] ] ], - [ /(\d+) ack-only packets \((\d+) delayed\)$/, [ [ :packets, 'ack-only' ], [ :packets, 'ack-only delayed' ] ] ], - [ /(\d+) URG only packets$/, [ [ :packets, 'URG only' ] ] ], - [ /(\d+) window probe packets$/, [ [ :packets, 'window probe' ] ] ], - [ /(\d+) window update packets$/, [ [ :packets, 'window update' ] ] ], - [ /(\d+) control packets$/, [ [ :packets, 'control' ] ] ] - ]), - - Graph.new('received', protocol, [ - [ /(\d+) packets received$/, [ [ :packets, 'total' ] ] ], - [ /(\d+) acks \(for (\d+) bytes\)$/, [ [ :packets, 'acks' ], [ :bytes, 'acks' ] ] ], - [ /(\d+) duplicate acks$/, [ [ :packets, 'duplicate acks' ] ] ], - [ /(\d+) acks for unsent data$/, [ [ :packets, 'acks for unsent data' ] ] ], - [ /(\d+) packets \((\d+) bytes\) received in-sequence$/, [ [ :packets, 'in-sequence' ], [ :bytes, 'in-sequence' ] ] ], - [ /(\d+) completely duplicate packets \((\d+) bytes\)$/, [ [ :packets, 'completely duplicate' ], [ :bytes, 'completely duplicate' ] ] ], - [ /(\d+) old duplicate packets$/, [ [ :packets, 'old duplicate' ] ] ], - [ /(\d+) packets with some dup\. data \((\d+) bytes duped\)$/, [ [ :packets, 'some dup. data' ], [ :bytes, 'partial dups' ] ] ], - [ /(\d+) out-of-order packets \((\d+) bytes\)$/, [ [ :packets, 'out-of-order' ], [ :bytes, 'out-of-order' ] ] ], - [ /(\d+) packets \((\d+) bytes\) of data after window$/, [ [ :packets, 'data after window' ], [ :bytes, 'data after window' ] ] ], - [ /(\d+) window probes$/, [ [ :packets, 'window probes' ] ] ], - [ /(\d+) window update packets$/, [ [ :packets, 'window update' ] ] ], - [ /(\d+) packets received after close$/, [ [ :packets, 'after close' ] ] ], - [ /(\d+) discarded for bad checksums$/, [ [ :packets, 'bad checksums' ] ] ], - [ /(\d+) discarded for bad header offset fields?$/, [ [ :packets, 'bad header offset flds' ] ] ], - [ /(\d+) discarded because packet too short$/, [ [ :packets, 'too short' ] ] ], - [ /(\d+) discarded due to memory problems$/, [ [ :packets, 'discarded: memory problems' ] ] ], - [ /(\d+) ignored RSTs in the windows$/, [ [ :packets, 'ignored RSTs in windows' ] ] ], - [ /(\d+) segments updated rtt \(of (\d+) attempts\)$/, [ [ :packets, 'RTT: updated' ], [ :packets, 'RTT: attempts to update' ] ] ] - ]), - - Graph.new('connections', protocol, [ - [ /(\d+) connection requests$/, [ [ :connections, 'requests' ] ] ], - [ /(\d+) connection accepts$/, [ [ :connections, 'accepts' ] ] ], - [ /(\d+) bad connection attempts$/, [ [ :connections, 'bad attempts' ] ] ], - [ /(\d+) listen queue overflows$/, [ [ :connections, 'listen queue overflows' ] ] ], - [ /(\d+) connections established \(including accepts\)$/, [ [ :connections, 'established' ] ] ], - [ /(\d+) connections closed \(including (\d+) drops\)$/, [ [ :connections, 'closed' ], [ :connections, 'dropped' ] ] ], - [ /(\d+) connections updated cached RTT on close$/, [ [ :connections, 'closed & upd cached RTT' ] ] ], - [ /(\d+) connections updated cached RTT variance on close$/, [ [ :connections, 'closed & upd cached RTT variance' ] ] ], - [ /(\d+) connections updated cached ssthresh on close$/, [ [ :connections, 'closed & upd cached ssthresh' ] ] ], - [ /(\d+) embryonic connections dropped$/, [ [ :connections, 'embryonic dropped' ] ] ] - ]), - - Graph.new('timeouts', protocol, [ - [ /(\d+) retransmit timeouts$/, [ [ :connections, 'retransmit' ] ] ], - [ /(\d+) connections dropped by rexmit timeout$/, [ [ :connections, 'retransmit: dropped' ] ] ], - [ /(\d+) persist timeouts$/, [ [ :connections, 'persist' ] ] ], - [ /(\d+) connections dropped by persist timeout$/, [ [ :connections, 'persist: dropped' ] ] ], - [ /(\d+) Connections \(fin_wait_2\) dropped because of timeout$/, [ [ :connections, 'fin_wait_2: dropped' ] ] ], - [ /(\d+) keepalive timeouts$/, [ [ :connections, 'keepalive' ] ] ], - [ /(\d+) keepalive probes sent$/, [ [ :connections, 'keepalive: probes sent' ] ] ], - [ /(\d+) connections dropped by keepalive$/, [ [ :connections, 'keepalive: dropped' ] ] ] - ]), - - Graph.new('correct predictions', protocol, [ - [ /(\d+) correct ACK header predictions$/, [ [ :predictions, 'ACK header' ] ] ], - [ /(\d+) correct data packet header predictions$/, [ [ :predictions, 'data packet header' ] ] ] - ]), - - Graph.new('SYN', protocol, [ - [ /(\d+) syncache entries added$/, [ [ :entries, 'cache added' ] ] ], - [ /(\d+) cookies sent$/, [ [ :entries, 'cookies sent' ] ] ], - [ /(\d+) cookies received$/, [ [ :entries, 'cookies received' ] ] ], - [ /(\d+) retransmitted$/, [ [ :entries, 'retransmitted' ] ] ], - [ /(\d+) dupsyn$/, [ [ :entries, 'duplicates' ] ] ], - [ /(\d+) dropped$/, [ [ :entries, 'dropped' ] ] ], - [ /(\d+) completed$/, [ [ :entries, 'completed' ] ] ], - [ /(\d+) bucket overflow$/, [ [ :entries, 'bucket overflow' ] ] ], - [ /(\d+) cache overflow$/, [ [ :entries, 'cache overflow' ] ] ], - [ /(\d+) reset$/, [ [ :entries, 'reset' ] ] ], - [ /(\d+) stale$/, [ [ :entries, 'stale' ] ] ], - [ /(\d+) aborted$/, [ [ :entries, 'aborted' ] ] ], - [ /(\d+) badack$/, [ [ :entries, 'bad ACK' ] ] ], - [ /(\d+) unreach$/, [ [ :entries, 'unreachable' ] ] ], - [ /(\d+) zone failures$/, [ [ :entries, 'zone failures' ] ] ], - [ /(\d+) hostcache entries added$/, [ [ :entries, 'hostcache added' ] ] ], - [ /(\d+) bucket overflow$/, [ [ :entries, 'hostcache overflow' ] ] ] - ]), - - Graph.new('SACK', protocol, [ - [ /(\d+) SACK recovery episodes$/, [ [ :packets, 'recovery episodes' ] ] ], - [ /(\d+) segment rexmits in SACK recovery episodes$/, [ [ :packets, 'segment rexmits' ] ] ], - [ /(\d+) byte rexmits in SACK recovery episodes$/, [ [ :bytes, 'bytes rexmitted' ] ] ], - [ /(\d+) SACK options \(SACK blocks\) received$/, [ [ :packets, 'options blocks rcvd' ] ] ], - [ /(\d+) SACK options \(SACK blocks\) sent$/, [ [ :packets, 'options blocks sent' ] ] ], - [ /(\d+) SACK scoreboard overflow$/, [ [ :packets, 'scoreboard overflow' ] ] ] - ]), - - Graph.new('ECN', protocol, [ - [ /(\d+) packets with ECN CE bit set$/, [ [ :packets, 'CE bit' ] ] ], - [ /(\d+) packets with ECN ECT\(0\) bit set$/, [ [ :packets, 'ECT(0) bit' ] ] ], - [ /(\d+) packets with ECN ECT\(1\) bit set$/, [ [ :packets, 'ECT(1) bit' ] ] ], - [ /(\d+) successful ECN handshakes$/, [ [ :packets, 'successful handshakes' ] ] ], - [ /(\d+) times ECN reduced the congestion window$/, [ [ :packets, 'congestion window reduced' ] ] ] - ]) - ] - when 'udp' - $os == :linux ? [ - ] : [ - Graph.new('received', protocol, [ - [ /(\d+) datagrams received$/, [ [ :packets, 'total' ] ] ], - [ /(\d+) with incomplete header$/, [ [ :packets, 'incomplete header' ] ] ], - [ /(\d+) with bad data length field$/, [ [ :packets, 'bad data length field' ] ] ], - [ /(\d+) with bad checksum$/, [ [ :packets, 'bad checksum' ] ] ], - [ /(\d+) with no checksum$/, [ [ :packets, 'no checksum' ] ] ], - [ /(\d+) dropped due to no socket$/, [ [ :packets, 'dropped: no socket' ] ] ], - [ /(\d+) broadcast\/multicast datagrams undelivered$/, [ [ :packets, '*cast undelivered' ] ] ], - [ /(\d+) dropped due to full socket buffers$/, [ [ :packets, 'dropped: no buffers' ] ] ], - [ /(\d+) not for hashed pcb$/, [ [ :packets, 'not for hashed pcb' ] ] ], - [ /(\d+) delivered$/, [ [ :packets, 'delivered' ] ] ] - ]), - - Graph.new('sent', protocol, [ - [ /(\d+) datagrams output$/, [ [ :packets, 'total' ] ] ], - [ /(\d+) times multicast source filter matched$/, [ [ :packets, 'multicast src filter match' ] ] ] - ]) - ] - when 'ip' - $os == :linux ? [ - ] : [ - Graph.new('received', protocol, [ - [ /(\d+) total packets received$/, [ [ :packets, 'total' ] ] ], - [ /(\d+) bad header checksums$/, [ [ :packets, 'bad header checksum' ] ] ], - [ /(\d+) with size smaller than minimum$/, [ [ :packets, 'size smaller than min' ] ] ], - [ /(\d+) with data size < data length$/, [ [ :packets, 'data size < data length' ] ] ], - [ /(\d+) with ip length > max ip packet size$/, [ [ :packets, 'ip length > max ip packet sz' ] ] ], - [ /(\d+) with header length < data size$/, [ [ :packets, 'header length < data size' ] ] ], - [ /(\d+) with data length < header length$/, [ [ :packets, 'data length < header length' ] ] ], - [ /(\d+) with bad options$/, [ [ :packets, 'bad options' ] ] ], - [ /(\d+) with incorrect version number$/, [ [ :packets, 'incorrect version' ] ] ], - [ /(\d+) fragments? received$/, [ [ :packets, 'fragments' ] ] ], - [ /(\d+) fragments? dropped \(dup or out of space\)$/, [ [ :packets, 'frags dropped: dup/out of spc' ] ] ], - [ /(\d+) fragments? dropped after timeout$/, [ [ :packets, 'frags dropped: timeout' ] ] ], - [ /(\d+) packets? reassembled ok$/, [ [ :packets, 'reassembled ok' ] ] ], - [ /(\d+) packets? for this host$/, [ [ :packets, 'for this host' ] ] ], - [ /(\d+) packets? for unknown\/unsupported protocol$/, [ [ :packets, 'for unknown/unsup protocol' ] ] ], - [ /(\d+) packets? forwarded \((\d+) packets fast forwarded\)$/, [ [ :packets, 'forwarded' ], [ :packets, 'fast forwarded' ] ] ], - [ /(\d+) packets? not forwardable$/, [ [ :packets, 'not forwardable' ] ] ], - [ /(\d+) packets? received for unknown multicast group$/, [ [ :packets, 'unknown multicast grp' ] ] ] - ]), - - Graph.new('sent', protocol, [ - [ /(\d+) packets? sent from this host$/, [ [ :packets, 'total' ] ] ], - [ /(\d+) redirects? sent$/, [ [ :packets, 'redirect' ] ] ], - [ /(\d+) packets? sent with fabricated ip header$/, [ [ :packets, 'fabricated IP head' ] ] ], - [ /(\d+) output packets? dropped due to no bufs, etc\.$/, [ [ :packets, 'dropped: no bufs, etc' ] ] ], - [ /(\d+) output packets? discarded due to no route$/, [ [ :packets, 'discarded: no route' ] ] ], - [ /(\d+) output datagrams? fragmented$/, [ [ :packets, 'fragmented' ] ] ], - [ /(\d+) fragments? created$/, [ [ :packets, 'fragments created' ] ] ], - [ /(\d+) datagrams? that can't be fragmented$/, [ [ :packets, "can't be fragmented" ] ] ], - [ /(\d+) tunneling packets? that can't find gif$/, [ [ :packets, 'tunneling, gif not found' ] ] ], - [ /(\d+) datagrams? with bad address in header$/, [ [ :packets, 'bad address in header' ] ] ] - ]) - ] - when 'arp' - $os == :linux ? [] : [ - Graph.new('packets', protocol, [ - # This is just a total, so ignore the value but keep regexp to avoid 'not parsed' warning. - [ /(\d+) ARP packets? received$/ ], - [ /(\d+) ARP requests? received$/, [ [ :packets, 'requests received' ] ] ], - [ /(\d+) ARP repl(?:y|ies) received$/, [ [ :packets, 'replies received' ] ] ], - [ /(\d+) ARP requests? sent$/, [ [ :packets, 'requests', 'requests received' ] ] ], - [ /(\d+) ARP repl(?:y|ies) sent$/, [ [ :packets, 'replies', 'replies received' ] ] ], - [ /(\d+) total packets? dropped due to no ARP entry$/, [ [ :packets, 'no entry' ] ] ] - ]), - - Graph.new('entries', protocol, [ - [ /(\d+) ARP entrys? timed out$/, [ [ :entries, 'timed out' ] ] ], - [ /(\d+) Duplicate IPs seen$/, [ [ :entries, 'duplicate IPs seen' ] ] ] - ]) - ] - end -end - -proto_name = File.basename($0, '.*').escape -proto_name.slice! 0, PLUGIN_NAME.size if proto_name.start_with? PLUGIN_NAME - -proto_name = 'tcp' if proto_name.empty? - -def netstat_s(protocol) - if $os == :linux - %w(tcp udp).include?(protocol) ? - `netstat -s --#{protocol}` : - `netstat -s --raw` - else - `netstat -sp #{protocol}` - end.lines.reject { |line| line =~ /^\w+:/ } -end - -case ARGV.first -when 'autoconf' - puts [:linux, :freebsd].include?($os) ? 'yes' : 'no' -when 'suggest' - puts $os == :linux ? %w(tcp) : %w(tcp udp ip arp) -when 'config' - graphs_for(proto_name).each { |graph| - puts graph.config.join $/ - } -else - data = netstat_s(proto_name) - graphs_for(proto_name).each { |graph| - puts graph.fetch(data).join $/ - } - - warn "not parsed:\n#{data.join}" unless data.empty? if $debug_mode -end - -# awful performance when scrolling through those regexps above -# vim: syntax=none diff --git a/plugins/network/nsd3 b/plugins/network/nsd3 index f2a4cddf..bddb3931 100755 --- a/plugins/network/nsd3 +++ b/plugins/network/nsd3 @@ -11,21 +11,21 @@ =head1 APPLICABLE SYSTEMS - Linux or *nix system with a logging installtion of NSD v3 installed. + Linux or *nix system with a logging installation of NSD v3 installed. (http://nlnetlabs.nl/projects/nsd/) =head1 CONFIGURATION - + The plugin needs access to the nsd logfile and the nsd pid file to force the running nsd process to write the current statistics. - Tip: To see if it's already set up correctly, just run this plugin + Tip: To see if it's already set up correctly, just run this plugin with the parameter "autoconf". If you get a "yes", everything should work like a charm already. This configuration section shows the defaults of the plugin: - The stats line is a set of space-separated values that you wish to + The stats line is a set of space-separated values that you wish to retrieve from NSD. The format is VALUE=Caption. For spaces in a caption value, replace them with an underscore (_). @@ -34,7 +34,7 @@ env.pidfile /var/run/nsd3/nsd.pid env.stats "A=A AAAA=AAAA MX=MX PTR=PTR TYPE252=AXFR SNXD=NXDOMAIN RQ=Total_Successful" - If you need to set a user for the logfile to be readable, and most + If you need to set a user for the logfile to be readable, and most importantly, the process to receive the signal, you may specify it. For example: @@ -42,29 +42,29 @@ user nsd =head1 INTERPRETATION - - The plugin shows the number of queries that nsd has received, + + The plugin shows the number of queries that nsd has received, averaged over a period to gain the number of queries per second. For most servers, these values will be very low. In the event of a misconfiguration, the plugin will return undefined values. =head1 MAGIC MARKERS - + #%# family=auto #%# capabilities=autoconf =head1 VERSION v1.0.1 - + =head1 AUTHOR J.T.Sage - + =head1 LICENSE - + GPLv2 - + =cut """ diff --git a/plugins/network/olsrd b/plugins/network/olsrd index 5fcfedba..e74568d0 100755 --- a/plugins/network/olsrd +++ b/plugins/network/olsrd @@ -1,62 +1,88 @@ #!/bin/sh # weird shebang? See below: "interpreter selection" -# -# Collect basic information about the neighbours of an OLSR node: -# * link quality -# * neighbour link quality -# * number of nodes reachable behind each neighbour -# * ping times of direct neighbours -# -# This plugin works with the following python interpreters: -# * Python 2 -# * Python 3 -# * micropython -# -# Environment variables: -# * OLSRD_HOST: name or IP of the host running the txtinfo plugin (default: localhost) -# * OLSRD_TXTINFO_PORT: the port that the txtinfo plugin is listening to (default: 2006) -# * OLSRD_BIN_PATH: name of the olsrd binary (only used for 'autoconf', defaults to /usr/sbin/olsrd) -# * MICROPYTHON_HEAP: adjust this parameter for micropython if your olsr network contains -# more than a few thousand nodes (default: 512k) -# -# -# Copyright (C) 2015 Lars Kruse -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# Magic markers -#%# capabilities=autoconf -#%# family=auto """true" +: <<=cut + +=head1 NAME + +olsrd - Monitor the state of an OLSR-based routing network + + +=head1 APPLICABLE SYSTEMS + +Information is parsed from the output of "txtinfo" plugin for olsrd. + + +=head1 CONFIGURATION + +Environment variables: + + * OLSRD_HOST: name or IP of the host running the txtinfo plugin (default: localhost) + * OLSRD_TXTINFO_PORT: the port that the txtinfo plugin is listening to (default: 2006) + * OLSRD_BIN_PATH: name of the olsrd binary (only used for 'autoconf', default: /usr/sbin/olsrd) + * MICROPYTHON_HEAP: adjust this parameter for micropython if your olsr network contains + more than a few thousand nodes (default: 512k) + +=head1 USAGE + +Collect basic information about the neighbours of an OLSR node: + + * link quality + * neighbour link quality + * number of nodes reachable behind each neighbour + * ping times of direct neighbours + +This plugin works with the following python interpreters: + + * Python 2 + * Python 3 + * micropython (e.g. OpenWrt) + + +=head1 VERSION + + 0.4 + + +=head1 AUTHOR + +Lars Kruse + + +=head1 LICENSE + +GPLv3 or above + + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=cut + + # ****************** Interpreter Selection *************** # This unbelievable dirty hack allows to find a suitable python interpreter. # This is specifically useful for OpenWRT where typically only micropython is available. # # Additionally we need to run micropython with additional startup options. -# This is necessary due to our demand for more than 128k heap (this default is sufficient for only 400 olsr nodes). +# This is necessary due to our demand for more than 128k heap (this default is sufficient for only +# 400 olsr nodes). # # This "execution hack" works as follows: # * the script is executed by busybox ash or another shell -# * the above line (three quotes before and one quote after 'true') evaluates differently for shell and python: -# * shell: run "true" (i.e. nothing happens) -# * python: ignore everything up to the next three consecutive quotes +# * the above line (three quotes before and one quote after 'true') evaluates differently for +# shell and python: +# * shell: run "true" (i.e. nothing happens) +# * python: ignore everything up to the next three consecutive quotes # Thus we may place shell code here that will take care for selecting an interpreter. # prefer micropython if it is available - otherwise fall back to any python (2 or 3) -if which micropython >/dev/null; then - /usr/bin/micropython -X "heapsize=${MICROPYTHON_HEAP:-512k}" "$0" "$@" +MICROPYTHON_BIN=$(which micropython || true) +if [ -n "$MICROPYTHON_BIN" ]; then + "$MICROPYTHON_BIN" -X "heapsize=${MICROPYTHON_HEAP:-512k}" "$0" "$@" else python "$0" "$@" fi @@ -68,19 +94,20 @@ true <= 2: + line = line.strip() + if line: + yield line + in_body_count += 1 fconn.close() conn.close() @@ -159,15 +201,17 @@ def get_address_device_mapping(): for line in query_olsrd_txtservice("mid"): # example line content: # 192.168.2.171 192.168.22.171;192.168.12.171 - device_id, mids = line.split() - for mid in mids.split(";"): + # since olsr v0.9.5: + # 192.168.2.171 192.168.22.171 192.168.12.171 + device_id, mids = line.split(None, 1) + for mid in mids.replace(";", " ").split(): mapping[mid] = device_id return mapping def count_routes_by_neighbour(address_mapping, ignore_list): node_count = {} - for line in query_olsrd_txtservice("routes"): + for line in query_olsrd_txtservice("rou"): # example line content: # 192.168.1.79/32 192.168.12.38 4 4.008 wlan0 tokens = line.split() @@ -192,8 +236,12 @@ def get_olsr_links(): hna_list = [line.split()[0] for line in query_olsrd_txtservice("hna")] route_count = count_routes_by_neighbour(mid_mapping, hna_list) result = [] - for line in query_olsrd_txtservice("links"): + for line in query_olsrd_txtservice("lin"): tokens = line.split() + # the "cost" may be infinite + if tokens[-1] == "INFINITE": + # "inf" is the python keyword for "maximum float number" + tokens[-1] = "inf" link = {} link["local"] = tokens.pop(0) remote = tokens.pop(0) @@ -217,9 +265,11 @@ def _read_file(filename): def get_ping_times(hosts): tempfile = "/tmp/munin-olsrd-{pid}.tmp".format(pid=os.getpid()) - command = 'for host in {hosts}; do echo -n "$host "; ping -c 1 -w 1 "$host" | grep /avg/ || true; done >{tempfile}'\ - .format(hosts=" ".join(hosts), tempfile=tempfile) - # micropython supports only "os.system" (as of 2015) - thus we need to stick with it for openwrt + command = ('for host in {hosts}; do echo -n "$host "; ' + 'ping -c 1 -w 1 "$host" | grep /avg/ || echo; done >{tempfile}' + .format(hosts=" ".join(hosts), tempfile=tempfile)) + # micropython supports only "os.system" (as of 2015) - thus we need to stick with it for + # OpenWrt. returncode = os.system(command) if returncode != 0: return {} @@ -237,71 +287,48 @@ def get_ping_times(hosts): return result -if __name__ == "__main__": - # parse arguments - if len(sys.argv) > 1: - if sys.argv[1]=="config": - links = list(get_olsr_links()) +def do_config(): + links = list(get_olsr_links()) - # link quality with regard to neighbours - print("multigraph olsr_link_quality") - print(LQ_GRAPH_CONFIG.format(title="OLSR Link Quality")) - is_first = True - for link in links: - print(LQ_VALUES_CONFIG.format(label=link["remote"], - suffix="_{host}".format(host=get_clean_fieldname(link["remote"])), - draw_type=("AREA" if is_first else "STACK"))) - is_first = False - is_first = True - for link in links: - print("multigraph olsr_link_quality.host_{remote}".format(remote=get_clean_fieldname(link["remote"]))) - print(LQ_GRAPH_CONFIG.format(title="Link Quality towards {host}".format(host=link["remote"]))) - print(LQ_VALUES_CONFIG.format(label="Link Quality", suffix="", draw_type="AREA")) - is_first = False + # link quality with regard to neighbours + print("multigraph olsr_link_quality") + print(LQ_GRAPH_CONFIG.format(title="OLSR Link Quality")) + for link in links: + print(LQ_VALUES_CONFIG.format( + label=link["remote"], + suffix="_{host}".format(host=get_clean_fieldname(link["remote"])), + draw_type="AREASTACK")) + for link in links: + print("multigraph olsr_link_quality.host_{remote}" + .format(remote=get_clean_fieldname(link["remote"]))) + title = "Link Quality towards {host}".format(host=link["remote"]) + print(LQ_GRAPH_CONFIG.format(title=title)) + print(LQ_VALUES_CONFIG.format(label="Link Quality", suffix="", draw_type="AREA")) - # link count ("number of nodes behind each neighbour") - print("multigraph olsr_neighbour_link_count") - print(NEIGHBOUR_COUNT_CONFIG) - is_first = True - for link in links: - print(NEIGHBOUR_COUNT_VALUE - .format(host=link["remote"], - host_fieldname=get_clean_fieldname(link["remote"]), - draw_type=("AREA" if is_first else "STACK"))) - is_first = False + # link count ("number of nodes behind each neighbour") + print("multigraph olsr_neighbour_link_count") + print(NEIGHBOUR_COUNT_CONFIG) + for link in links: + print(NEIGHBOUR_COUNT_VALUE + .format(host=link["remote"], host_fieldname=get_clean_fieldname(link["remote"]), + draw_type="AREASTACK")) - # neighbour ping - print("multigraph olsr_neighbour_ping") - print(NEIGHBOUR_PING_CONFIG.format(title="Ping time of neighbours")) - for link in links: - print(NEIGHBOUR_PING_VALUE - .format(host=link["remote"], - host_fieldname=get_clean_fieldname(link["remote"]))) - # neighbour pings - single subgraphs - for link in links: - remote = get_clean_fieldname(link["remote"]) - print("multigraph olsr_neighbour_ping.host_{remote}".format(remote=remote)) - print(NEIGHBOUR_PING_CONFIG.format(title="Ping time of {remote}".format(remote=remote))) - print(NEIGHBOUR_PING_VALUE.format(host=link["remote"], host_fieldname=remote)) + # neighbour ping + print("multigraph olsr_neighbour_ping") + print(NEIGHBOUR_PING_CONFIG.format(title="Ping time of neighbours")) + for link in links: + print(NEIGHBOUR_PING_VALUE + .format(host=link["remote"], host_fieldname=get_clean_fieldname(link["remote"]))) + # neighbour pings - single subgraphs + for link in links: + remote = get_clean_fieldname(link["remote"]) + print("multigraph olsr_neighbour_ping.host_{remote}".format(remote=remote)) + title = "Ping time of {remote}".format(remote=remote) + print(NEIGHBOUR_PING_CONFIG.format(title=title)) + print(NEIGHBOUR_PING_VALUE.format(host=link["remote"], host_fieldname=remote)) - sys.exit(0) - elif sys.argv[1] == "autoconf": - if os.path.exists(os.getenv('OLSRD_BIN_PATH', '/usr/sbin/olsrd')): - print('yes') - else: - print('no') - sys.exit(0) - elif sys.argv[1] == "version": - print('olsrd Munin plugin, version %s' % plugin_version) - sys.exit(0) - elif sys.argv[1] == "": - # ignore - pass - else: - # unknown argument - sys.stderr.write("Unknown argument{eol}".format(eol=LINESEP)) - sys.exit(1) +def do_fetch(): # output values links = list(get_olsr_links()) @@ -331,17 +358,44 @@ if __name__ == "__main__": ping_times = get_ping_times([link["remote"] for link in links]) for link in links: ping_time = ping_times.get(link["remote"], None) - if ping_time is not None: - print("neighbour_{remote}.value {value:.4f}" - .format(value=ping_time, - remote=get_clean_fieldname(link["remote"]))) + value = "{:.4f}".format(ping_time) if ping_time is not None else "U" + print("neighbour_{remote}.value {value}" + .format(value=value, remote=get_clean_fieldname(link["remote"]))) # single detailed graphs for the ping time of each link for link in links: ping_time = ping_times.get(link["remote"], None) - if ping_time is not None: - remote = get_clean_fieldname(link["remote"]) - print("multigraph olsr_neighbour_ping.host_{remote}".format(remote=remote)) - print("neighbour_{remote}.value {value:.4f}".format(remote=remote, value=ping_time)) + value = "{:.4f}".format(ping_time) if ping_time is not None else "U" + remote = get_clean_fieldname(link["remote"]) + print("multigraph olsr_neighbour_ping.host_{remote}".format(remote=remote)) + print("neighbour_{remote}.value {value}".format(remote=remote, value=value)) + + +if __name__ == "__main__": + # parse arguments + if len(sys.argv) > 1: + if sys.argv[1] == "config": + do_config() + if os.getenv("MUNIN_CAP_DIRTYCONFIG") == "1": + do_fetch() + sys.exit(0) + elif sys.argv[1] == "autoconf": + if os.path.exists(os.getenv('OLSRD_BIN_PATH', '/usr/sbin/olsrd')): + print('yes') + else: + print('no') + sys.exit(0) + elif sys.argv[1] == "version": + print('olsrd Munin plugin, version %s' % plugin_version) + sys.exit(0) + elif sys.argv[1] == "": + # ignore + pass + else: + # unknown argument + sys.stderr.write("Unknown argument{eol}".format(eol=LINESEP)) + sys.exit(1) + + do_fetch() # final marker for shell / python hybrid script (see "Interpreter Selection") EOF = True diff --git a/plugins/network/proc_netstat b/plugins/network/proc_netstat index 3562201e..86c7f3e2 100755 --- a/plugins/network/proc_netstat +++ b/plugins/network/proc_netstat @@ -32,11 +32,10 @@ STRIP_OUTPUT="/\($TO_REMOVE\)/d" if [ "$1" = "autoconf" ]; then if [ -r $NETSTATS ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/network/qos_ b/plugins/network/qos_ index 06592904..99743cbc 100755 --- a/plugins/network/qos_ +++ b/plugins/network/qos_ @@ -46,9 +46,9 @@ if ( exists $ARGV[0] and $ARGV[0] eq 'autoconf' ) { } else { print "no (program $TC died)\n"; } - exit 1; + } else { + print "yes\n"; } - print "yes\n"; exit 0; } @@ -115,9 +115,9 @@ if ( exists $ARGV[0] and $ARGV[0] eq 'config' ) { last; } } - if($haschild == 1) { + if($haschild == 1) { $queues{$key}->{leaf} = 0; - next; + next; } $queues{$key}->{leaf} = 1; print $queues{$key}->{queue},$queues{$key}->{handle}, " "; diff --git a/plugins/network/quagga_routes b/plugins/network/quagga_routes index 51b87561..da586205 100755 --- a/plugins/network/quagga_routes +++ b/plugins/network/quagga_routes @@ -4,7 +4,7 @@ if [ "$1" = "config" ]; then echo 'graph_title quagga routes' echo 'graph_args -l 0' echo 'graph_vlabel routes' - echo 'graph_category System' + echo 'graph_category network' echo 'graph_scale no' /usr/local/bin/vtysh -e "sh ip ro sum" | awk ' /connected/ {print $1 ".label " $1 "\n" $1 ".draw AREASTACK"} diff --git a/plugins/network/radio b/plugins/network/radio index 4534506e..460f4b49 100755 --- a/plugins/network/radio +++ b/plugins/network/radio @@ -14,7 +14,7 @@ v0.2 - Updated to match output of current versions of ice- and shoutcast. */ // -------------- CONFIGURATION START --------------------------------------- - $cfg = array( + $cfg = array( // SERVER #1 array( "name" => "IceCast", // name for munin "type" => "ice", // server-type (ice/shout) diff --git a/plugins/network/shorewall-accounting_ b/plugins/network/shorewall-accounting_ index 9f6f9f21..4bb27d38 100755 --- a/plugins/network/shorewall-accounting_ +++ b/plugins/network/shorewall-accounting_ @@ -6,7 +6,7 @@ # Basically this plugin examines the output of "shorewall -x show accounting". # See http://atlee.ca/blog/2006/01/20/munin-shorewall/ for a description of # the original script by Chris AtLee. -# +# # Copyright 2010-2012 Lars Kruse # Copyright 2006 Chris AtLee # diff --git a/plugins/network/shorewall_ b/plugins/network/shorewall_ index a48f8007..b32bb7a0 100755 --- a/plugins/network/shorewall_ +++ b/plugins/network/shorewall_ @@ -1,11 +1,11 @@ #!/usr/bin/python # shorewall_ v2.0 - 30 Aug 2008 - Tanguy Pruvot -# +# # A munin plugin for tracking traffic as recorded by shorewall accounting rules -# +# # ex: ln -s /usr/share/munin/plugins/shorewall_ /etc/munin/plugins/shorewall_ftp # will log ftp* rules like ftp, ftp_input, ftp_output etc... -# +# # Basic Concept by Chris AtLee Released under the GPL v2 import sys, commands, re @@ -61,7 +61,7 @@ def getBytesByChain(): for name in chainNames: retval.append((name, chains[name])) return retval - + if len(sys.argv) > 1: if sys.argv[1] == "autoconf": print "yes" @@ -76,6 +76,6 @@ if len(sys.argv) > 1: print "%s.label %s" % (chain, chain) print "%s.cdef %s,8,*" % (chain, chain) sys.exit(0) - + for chain, bytes in getBytesByChain(): print "%s.value %i" % (chain, bytes) \ No newline at end of file diff --git a/plugins/network/shorewall_acc b/plugins/network/shorewall_acc index 2e60cd2d..5a5fc479 100755 --- a/plugins/network/shorewall_acc +++ b/plugins/network/shorewall_acc @@ -6,7 +6,7 @@ # Released under the GPL v2 import sys, commands, re accountingLineExp = re.compile(r"^\s*\d+[KMG]*\s+(\d+)([KMGT]*)\s+(\w+).*$") - + def getBytesByChain(): trafficCmd = "shorewall" status, output = commands.getstatusoutput("/sbin/shorewall show accounting 2>/dev/null") @@ -41,7 +41,7 @@ def getBytesByChain(): for name in chainNames: retval.append((name, chains[name])) return retval - + if len(sys.argv) > 1: if sys.argv[1] == "autoconf": print "yes" @@ -56,6 +56,6 @@ if len(sys.argv) > 1: print "%s.label %s" % (chain, chain) print "%s.cdef %s,8,*" % (chain, chain) sys.exit(0) - + for chain, bytes in getBytesByChain(): print "%s.value %i" % (chain, bytes) diff --git a/plugins/network/smtp_hello_ b/plugins/network/smtp_hello_ index 5c293beb..b8300d03 100755 --- a/plugins/network/smtp_hello_ +++ b/plugins/network/smtp_hello_ @@ -43,11 +43,10 @@ elif [ "$1" == "autoconf" ]; then if [ -x /usr/bin/time ] && [ -x /usr/bin/nc ]; then echo "yes" - exit 0 else echo "no (/usr/bin/time or /usr/bin/nc missing)" - exit 1 fi + exit 0 else diff --git a/plugins/network/sockstat-via-procfs b/plugins/network/sockstat-via-procfs index 4c875aa2..8311eaa7 100755 --- a/plugins/network/sockstat-via-procfs +++ b/plugins/network/sockstat-via-procfs @@ -20,20 +20,20 @@ fields = ( if len(sys.argv) > 1: arg = sys.argv[1] if arg == 'autoconfig': - print 'yes' + print('yes') sys.exit(0) if arg == 'config': - print 'graph_title sockstat' - print 'graph_category network' + print('graph_title sockstat') + print('graph_category network') for name, label in fields: - print '%s.label %s' % (name, label) + print('%s.label %s' % (name, label)) sys.exit(0) -re_num = re.compile('(\d+)') +re_num = re.compile(r'(\d+)') sockstat = open('/proc/net/sockstat').read() numbers = re_num.findall(sockstat) for i, (name, label) in enumerate(fields): - print '%s.value %s' % (name, numbers[i]) + print('%s.value %s' % (name, numbers[i])) sys.exit(0) diff --git a/plugins/network/speedtest-download-bandwidth b/plugins/network/speedtest-download-bandwidth index 7f930b1c..545ece40 100755 --- a/plugins/network/speedtest-download-bandwidth +++ b/plugins/network/speedtest-download-bandwidth @@ -21,8 +21,8 @@ $labels = array( "free" => "test-debit.free.fr" ); -//Connexion Mbits (30/100) -$connexion = 35; +//Connection Mbits (30/100) +$connection = 35; // CONFIG ------------------------------------------------------------------ if ($argc > 1 && $argv[1]=='config'){ @@ -33,10 +33,10 @@ graph_args --base 1024 graph_vlabel DL (MB/s) grenouille.label Grenouille (NC) grenouille.type GAUGE -maximum.label Connexion (max) +maximum.label Connection (max) maximum.type GAUGE maximum.colour ff0000 -maximum.max ".$connexion."00000 +maximum.max ".$connection."00000 "; $order=""; @@ -76,7 +76,7 @@ foreach ($mire as $label => $url) { $output .= "$label.value ".round($cache[$label])."\n"; } -$output .= "maximum.value ".round($connexion * 1024 * 1024 / 10)."\n"; +$output .= "maximum.value ".round($connection * 1024 * 1024 / 10)."\n"; echo $output; // SPEED TEST -------------------------------------------------------------- diff --git a/plugins/network/tc_ b/plugins/network/tc_ index 9933edd2..1b774e46 100755 --- a/plugins/network/tc_ +++ b/plugins/network/tc_ @@ -50,18 +50,19 @@ mytc() { case "$1" in autoconf) - if [ -r /proc/net/dev ]; then - echo yes - exit 0 - else + if [ ! -r /proc/net/dev ]; then echo "no (/proc/net/dev not found)" - exit 1 + elif [ ! -x /sbin/tc ]; then + echo "no (missing 'tc' executable)" + else + echo yes fi + exit 0 ;; suggest) if [ -r /proc/net/dev ]; then awk ' - /^ *(eth|tap|bond|wlan|ath|ra|sw)[0-9]+:/ { + /^ *(eth|tap|bond|wlan|ath|ra|sw|eno|ens|enp|wlp|wl)[0-9]*/ { split($0, a, /: */); gsub(/^ +/,"",a[1]); if (($2 > 0) || ($10 > 0)) print a[1]; }' /proc/net/dev diff --git a/plugins/network/tc_drops_ b/plugins/network/tc_drops_ index f719291b..ab049c64 100755 --- a/plugins/network/tc_drops_ +++ b/plugins/network/tc_drops_ @@ -21,35 +21,34 @@ mytc() { case $1 in autoconf) - if [ -r /proc/net/dev ]; then - echo yes - exit 0 - else + if [ ! -r /proc/net/dev ]; then echo "no (/proc/net/dev not found)" - exit 1 + elif ! which tc >/dev/null; then + echo "no (missing 'tc' executable)" + else + echo yes fi + exit 0 ;; suggest) if [ -r /proc/net/dev ]; then awk ' - /^ *(eth|tap|bond|wlan|ath|ra|sw)[0-9]+:/ { + /^ *(eth|tap|bond|wlan|ath|ra|sw|eno|ens|enp|wlp|wl)[0-9]*/ { split($0, a, /: */); gsub(/^ +/,"",a[1]); if (($2 > 0) || ($10 > 0)) print a[1]; }' /proc/net/dev - -# egrep '^ *(eth|tap|bond|wlan|ath|ra|sw)[0-9]+:' /proc/net/dev | cut -f1 -d: | sed 's/ //g' fi exit 0 ;; config) - + echo "graph_order `mytc $DEVICE | awk '{ print $2 "_" $3 "_drops" }' | tr "\n" " "`" echo "graph_title $DEVICE TC traffic drops" echo 'graph_args --base 1000' echo 'graph_vlabel drops per ${graph_period}' echo 'graph_category network' echo "graph_info This graph shows the TC classes traffic drops of the $DEVICE network interface, epxressed in packets." - + # mytc $DEVICE | tr "_" " " | awk '{ print $2 "_" $3 "_" $4 "_drops.label " $2 "/" $3 ":" $4 "\n" $2 "_" $3 "_" $4 "_drops.type COUNTER\n" $2 "_" $3 "_" $4 "_drops.min 0\n" $2 "_" $3 "_" $4 "_drops.cdef " $2 "_" $3 "_" $4 ",8,*" }' mytc $DEVICE | tr "_" " " | awk '{ print $2 "_" $3 "_" $4 "_drops.label " $2 "/" $3 ":" $4 "\n" $2 "_" $3 "_" $4 "_drops.type DERIVE\n" $2 "_" $3 "_" $4 "_drops.min 0" }' exit 0 diff --git a/plugins/network/tc_packets_ b/plugins/network/tc_packets_ index 8ae58204..0a280c2e 100755 --- a/plugins/network/tc_packets_ +++ b/plugins/network/tc_packets_ @@ -21,35 +21,34 @@ mytc() { case $1 in autoconf) - if [ -r /proc/net/dev ]; then - echo yes - exit 0 - else + if [ ! -r /proc/net/dev ]; then echo "no (/proc/net/dev not found)" - exit 1 + elif ! which tc >/dev/null; then + echo "no (missing 'tc' executable)" + else + echo yes fi + exit 0 ;; suggest) if [ -r /proc/net/dev ]; then awk ' - /^ *(eth|tap|bond|wlan|ath|ra|sw)[0-9]+:/ { + /^ *(eth|tap|bond|wlan|ath|ra|sw|eno|ens|enp|wlp|wl)[0-9]*/ { split($0, a, /: */); gsub(/^ +/,"",a[1]); if (($2 > 0) || ($10 > 0)) print a[1]; }' /proc/net/dev - -# egrep '^ *(eth|tap|bond|wlan|ath|ra|sw)[0-9]+:' /proc/net/dev | cut -f1 -d: | sed 's/ //g' fi exit 0 ;; config) - + echo "graph_order `mytc $DEVICE | awk '{ print $2 "_" $3 "_packets" }' | tr "\n" " "`" echo "graph_title $DEVICE TC traffic packets" echo 'graph_args --base 1000' echo 'graph_vlabel packets per ${graph_period}' echo 'graph_category network' echo "graph_info This graph shows the TC classes traffic packets of the $DEVICE network interface." - + # mytc $DEVICE | tr "_" " " | awk '{ print $2 "_" $3 "_" $4 "_packets.label " $2 "/" $3 ":" $4 "\n" $2 "_" $3 "_" $4 "_packets.type COUNTER\n" $2 "_" $3 "_" $4 "_packets.min 0\n" $2 "_" $3 "_" $4 "_packets.cdef " $2 "_" $3 "_" $4 ",8,*" }' mytc $DEVICE | tr "_" " " | awk '{ print $2 "_" $3 "_" $4 "_packets.label " $2 "/" $3 ":" $4 "\n" $2 "_" $3 "_" $4 "_packets.type DERIVE\n" $2 "_" $3 "_" $4 "_packets.min 0" }' exit 0 diff --git a/plugins/network/traffic_ipt b/plugins/network/traffic_ipt index a2a994e4..29f899f5 100755 --- a/plugins/network/traffic_ipt +++ b/plugins/network/traffic_ipt @@ -110,7 +110,7 @@ ipv6=0 diffv4=0 diffv6=0 -IPv4_bytes=$(iptables -L -n -v -x | egrep '^\W*[0-9]+\W+[0-9]+\W+all\W+--\W+\*\W+\*\W+0.0.0.0/0\W+0.0.0.0/0\W*$' | while read pkts bytes rest; do echo $bytes; done) +IPv4_bytes=$(iptables -L -n -v -x -w | egrep '^\W*[0-9]+\W+[0-9]+\W+all\W+--\W+\*\W+\*\W+0.0.0.0/0\W+0.0.0.0/0\W*$' | while read pkts bytes rest; do echo $bytes; done) if [ -z "$IPv4_bytes" ]; then echo "W: Unable to read rule from iptables, please add rules" >&2 @@ -118,7 +118,7 @@ else ipv4=$(echo $IPv4_bytes | sed -e 's/ / + /' | bc -l) fi -IPv6_bytes=$(ip6tables -L -n -v -x | egrep '^\W*[0-9]+\W+[0-9]+\W+all\W+\*\W+\*\W+::/0\W+::/0\W*$' | while read pkts bytes rest; do echo $bytes; done) +IPv6_bytes=$(ip6tables -L -n -v -x -w | egrep '^\W*[0-9]+\W+[0-9]+\W+all\W+\*\W+\*\W+::/0\W+::/0\W*$' | while read pkts bytes rest; do echo $bytes; done) if [ -z "$IPv6_bytes" ]; then echo "W: Unable to read rule from ip6tables, please add rules" >&2 diff --git a/plugins/network/transmission b/plugins/network/transmission index 8d95b1de..e2d252f1 100755 --- a/plugins/network/transmission +++ b/plugins/network/transmission @@ -13,7 +13,7 @@ This plugin implements the multigraph protocol and provides the following graphs transmission_throughput - monitor traffic volumes of Transmission torrents transmission_activity - plugin to monitor traffic speed of Transmission torrents -This plugin requires python and the transmissionrpc python module. +This plugin requires python and the transmissionrpc python module. See http://pypi.python.org/pypi/transmissionrpc/ =head1 CONFIGURATION @@ -64,66 +64,64 @@ find this plugin on github at http://github.com/VolatileMesh/munin-plugins =head1 CHANGELOG =head2 1.0 - 2010/11/12 - + first release =head2 1.1 - 2011/05/29 - + fix transmission error handling - + =cut """ __version__ = '1.1' -import os, sys +import os +import sys from string import Template -plugin_name=list(os.path.split(sys.argv[0]))[1] -host = os.getenv('host','localhost') -port = os.getenv('port',9091) +plugin_name = list(os.path.split(sys.argv[0]))[1] +host = os.getenv('host', 'localhost') +port = os.getenv('port', 9091) user = os.getenv('user') passwd = os.getenv('pass') +title_host = '' if host in ['localhost', '127.0.0.1', '::1'] else ' on ' + host def config(): conf = Template("""multigraph ${plugin_name}_throughput -graph_title Transmission throughput for ${host} +graph_title Transmission throughput${title_host} graph_vlabel bytes/${graph_period} in (-) / out (+) graph_args --base 1000 graph_category network -graph_info This graph shows the throughput for Transmission torrents +graph_info This graph shows the throughput for Transmission torrents on ${host} down.label throughput down.type COUNTER -down.draw LINE1 +down.draw AREA down.min 0 down.graph no -up.label sent +up.label Bps up.negative down up.type COUNTER -up.draw LINE1 +up.draw AREA up.min 0 multigraph ${plugin_name}_activity -graph_title Transmission activity for ${host} +graph_title Transmission activity${title_host} graph_vlabel torrents graph_args --base 1000 graph_category network -graph_info This graph shows the number of Transmission torrents -total.label total -total.draw AREA -total.min 0 -total.colour AFE3FF +graph_info This graph shows the number of Transmission torrents on ${host} active.label active active.draw AREA active.min 0 -active.colour 77FF6F +active.colour COLOUR0 paused.label paused -paused.draw LINE1 +paused.draw STACK paused.min 0 -paused.colour 8F8F8F +paused.colour COLOUR8 """) - print conf.safe_substitute(plugin_name=plugin_name, host=host) + print conf.safe_substitute(plugin_name=plugin_name, host=host, title_host=title_host) sys.exit(0) @@ -135,10 +133,9 @@ def autoconf(): print 'no python module \'transmissionrpc\' missing' - def fetch(): import transmissionrpc - + try: client = transmissionrpc.Client(host, port=port, user=user, password=passwd) except transmissionrpc.TransmissionError, err: @@ -148,20 +145,15 @@ def fetch(): stats = client.session_stats(10) print_values_throughput(stats) print_values_activity(stats) - - + + def print_values_activity(stats): print "multigraph {plugin_name}_activity".format(plugin_name=plugin_name) - try: - print "total.value %s" % stats.torrentCount - except: - print "total.value U" - try: print "active.value %s" % stats.activeTorrentCount except: print "active.value U" - + try: print "paused.value %s" % stats.pausedTorrentCount except: @@ -174,12 +166,11 @@ def print_values_throughput(stats): print "down.value %s" % stats.cumulative_stats['downloadedBytes'] except: print "down.value U" - + try: print "up.value %s" % stats.cumulative_stats['uploadedBytes'] except: print "up.value U" - def dumpstats(): @@ -192,16 +183,16 @@ def dumpstats(): stats = client.session_stats(10) print stats - + if __name__ == '__main__': - if len(sys.argv)>1 : - if sys.argv[1]=="dumpstats" : - dumpstats() - elif sys.argv[1]=="config" : + if len(sys.argv) > 1 : + if sys.argv[1] == "dumpstats" : + dumpstats() + elif sys.argv[1] == "config" : config() - elif sys.argv[1]=="autoconf" : + elif sys.argv[1] == "autoconf" : autoconf() - elif sys.argv[1]!="": + elif sys.argv[1] != "": raise ValueError, "unknown parameter '%s'" % sys.argv[1] fetch() diff --git a/plugins/network/ubiquiti_airfiber_ b/plugins/network/ubiquiti_airfiber_ old mode 100644 new mode 100755 index 96e04ca2..233194fc --- a/plugins/network/ubiquiti_airfiber_ +++ b/plugins/network/ubiquiti_airfiber_ @@ -4,7 +4,7 @@ # Multigraph munin plugin to monitor Ubiquiti AirOS F (airFiber) devices various parameters. It needs # Perl's Net::Telnet or Net::OpenSSH to be able to connect. # -# To use this plugin, copy it to the munin's plugin directory (eg. /usr/share/munin/plugins) +# To use this plugin, copy it to the munin's plugin directory (eg. /usr/share/munin/plugins) # under the name "ubiquiti_airfiber_". Don't change this filename! Follow these steps: # # 1. Give names to your devices, in fqdn style. Like "master.wlan" or "slave.wlan". To make the @@ -22,7 +22,7 @@ # # 3. In /etc/munin/plugin-conf.d/munin-node add the following, to be able to contact # those devices via telnet (obviously replacing these with your own data): -# +# # [ubiquiti_airos_master.wlan] # user root # User and Group are required only if using any of the SSH modes to store # group root # the keys in /root/.ssh directory (or any user with homedir and shell) @@ -53,12 +53,12 @@ # # 5. Restart the munin node by 'service munin-node restart'. # -# If all went well, after 5 minutes or so you should have two additional nodes listed +# If all went well, after 5 minutes or so you should have two additional nodes listed # on the Web Interface of munin. # -# To use the script with public keys authentication and no password, set env.NetMode SSHkey, and -# create a pair of keys using command 'sudo ssh-keygen -t rsa'. This will generate in /root/.ssh -# directory two files, id_rsa and id_rsa.pub. Upload id_rsa.pub to your Ubiquiti device using +# To use the script with public keys authentication and no password, set env.NetMode SSHkey, and +# create a pair of keys using command 'sudo ssh-keygen -t rsa'. This will generate in /root/.ssh +# directory two files, id_rsa and id_rsa.pub. Upload id_rsa.pub to your Ubiquiti device using # Services > SSH Server > Authorized Keys window. Try to log in to the device by command line # first ('sudo ssh foobar@slave.wlan'), to save the RSA key fingerprint in the root account. # @@ -82,7 +82,7 @@ use warnings; my $NetMode = $ENV{'NetMode'}; if ($NetMode =~ /Telnet/) { use Net::Telnet; -} +} elsif ($NetMode =~ /SSH/) { use Net::OpenSSH; } @@ -350,20 +350,20 @@ if ($NetMode =~ /Telnet/) { elsif ($NetMode =~ /SSHPass/) { ## Initiate SSH Session using password authentication - $SSH = Net::OpenSSH->new($Hostname, + $SSH = Net::OpenSSH->new($Hostname, port => $Port, - user => $User, + user => $User, password => $Pass, timeout => 10, # master_stderr_discard => 1, master_opts => [-o => "StrictHostKeyChecking=no"]); - $SSH->error and + $SSH->error and # warn "Couldn't establish SSH connection: " . $SSH->error; &printResults(); # Nothing happens, except printing undefined results to munin } elsif ($NetMode =~ /SSHKey/) { - ## Initiate SSH Session using public key authentication + ## Initiate SSH Session using public key authentication $SSH = Net::OpenSSH->new($Hostname, port => $Port, user => $User, @@ -541,8 +541,8 @@ $ping_timelo = ($1 / 1000) if ($pinglo =~ m@min/avg/max.*\s\d+(?:\.\d+)?/(\d+(?: $packet_losslo = $1 if ($pinglo =~ /(\d+)% packet loss/); -chomp($load, $uptime, $ping_time, $packet_loss, $cpuuser, $cpusystem, $cpunice, $cpuidle, $cpuiowait, $cpuirq, $cpusoftirq, $ping_timelo, $packet_losslo, - $rxrate, $txrate, $rxpower0, $rxpower1, $powerout, $feet, $dist, $temp0, $temp1, +chomp($load, $uptime, $ping_time, $packet_loss, $cpuuser, $cpusystem, $cpunice, $cpuidle, $cpuiowait, $cpuirq, $cpusoftirq, $ping_timelo, $packet_losslo, + $rxrate, $txrate, $rxpower0, $rxpower1, $powerout, $feet, $dist, $temp0, $temp1, $rssi0, $rssi1, $baseline, $fade, $txfreq, $rxfreq, $txmodrate, $speed); &printResults(); diff --git a/plugins/network/ubiquiti_airos_ b/plugins/network/ubiquiti_airos_ old mode 100644 new mode 100755 index f1e7351d..9be5cca0 --- a/plugins/network/ubiquiti_airos_ +++ b/plugins/network/ubiquiti_airos_ @@ -4,7 +4,7 @@ # Multigraph munin plugin to monitor Ubiquiti AirOS devices various parameters. It needs # Perl's Net::Telnet or Net::OpenSSH to be able to connect. # -# To use this plugin, copy it to the munin's plugin directory (eg. /usr/share/munin/plugins) +# To use this plugin, copy it to the munin's plugin directory (eg. /usr/share/munin/plugins) # under the name "ubiquiti_airos_". Don't change this filename! Follow these steps: # # 1. Give names to your devices, in fqdn style. Like "apo.wlan" or "cli.wlan". To make the @@ -53,12 +53,12 @@ # # 5. Restart the munin node by 'service munin-node restart'. # -# If all went well, after 5 minutes or so you should have two additional nodes listed +# If all went well, after 5 minutes or so you should have two additional nodes listed # on the Web Interface of munin. # -# To use the script with public keys authentication and no password, set env.NetMode SSHkey, and -# create a pair of keys using command 'sudo ssh-keygen -t rsa'. This will generate in /root/.ssh -# directory two files, id_rsa and id_rsa.pub. Upload id_rsa.pub to your Ubiquiti device using +# To use the script with public keys authentication and no password, set env.NetMode SSHkey, and +# create a pair of keys using command 'sudo ssh-keygen -t rsa'. This will generate in /root/.ssh +# directory two files, id_rsa and id_rsa.pub. Upload id_rsa.pub to your Ubiquiti device using # Services > SSH Server > Authorized Keys window. Try to log in to the device by command line # first ('sudo ssh foobar@cli.wlan'), to save the RSA key fingerprint in the root account. # @@ -89,7 +89,7 @@ use warnings; my $NetMode = $ENV{'NetMode'}; if ($NetMode =~ /Telnet/) { use Net::Telnet; -} +} elsif ($NetMode =~ /SSH/) { use Net::OpenSSH; } @@ -103,7 +103,7 @@ chomp ($HostLo); ############################################################################## ## Define variables my $graph_period = "second"; -my ($load, $uptime, $ping_time, $packet_loss, $amc, $amq, $conn, $mt, $mf, $mb, $errlrcvd, $errltrans, $errwrcvd, $errwtrans, $ping_timelo, $packet_losslo); +my ($load, $uptime, $ping_time, $packet_loss, $amc, $amq, $conn, $mt, $mf, $mb, $errlrcvd, $errltrans, $errwrcvd, $errwtrans, $ping_timelo, $packet_losslo); my ($trflup, $trfldown, $trfwup, $trfwdown, $freq, $txccq, $acttimeout, $txsignal, $noisefloor, $txrate, $rxrate, $errnwid); my ($errcrypt, $errfrag, $errretries, $errbmiss, $errother, $cpuuser, $cpusystem, $cpunice, $cpuidle, $cpuiowait, $cpuirq, $cpusoftirq); $load = $uptime = $ping_time = $packet_loss = $amc = $amq = $conn = $mt = $mf = $mb = $errlrcvd = $errltrans = $errwrcvd = $errwtrans = $ping_timelo = $packet_losslo = "U"; @@ -454,20 +454,20 @@ if ($NetMode =~ /Telnet/) { elsif ($NetMode =~ /SSHPass/) { ## Initiate SSH Session using password authentication - $SSH = Net::OpenSSH->new($Hostname, + $SSH = Net::OpenSSH->new($Hostname, port => $Port, - user => $User, + user => $User, password => $Pass, timeout => 10, # master_stderr_discard => 1, master_opts => [-o => "StrictHostKeyChecking=no"]); - $SSH->error and + $SSH->error and # warn "Couldn't establish SSH connection: " . $SSH->error; &printResults(); # Nothing happens, except printing undefined results to munin } elsif ($NetMode =~ /SSHKey/) { - ## Initiate SSH Session using public key authentication + ## Initiate SSH Session using public key authentication $SSH = Net::OpenSSH->new($Hostname, port => $Port, user => $User, @@ -666,8 +666,8 @@ my $pinglo = join(" ", @pinglo); $ping_timelo = ($1 / 1000) if ($pinglo =~ m@min/avg/max.*\s\d+(?:\.\d+)?/(\d+(?:\.\d+)?)/\d+(?:\.\d+)?@); $packet_losslo = $1 if ($pinglo =~ /(\d+)% packet loss/); -chomp($load, $uptime, $ping_time, $packet_loss, $amc, $amq, $conn, $mt, $mf, $mb, $errlrcvd, $errltrans, $ping_timelo, $packet_losslo, - $errwrcvd, $errwtrans, $trflup, $trfldown, $trfwup, $trfwdown, $freq, $txccq, $acttimeout, $txsignal, +chomp($load, $uptime, $ping_time, $packet_loss, $amc, $amq, $conn, $mt, $mf, $mb, $errlrcvd, $errltrans, $ping_timelo, $packet_losslo, + $errwrcvd, $errwtrans, $trflup, $trfldown, $trfwup, $trfwdown, $freq, $txccq, $acttimeout, $txsignal, $noisefloor, $txrate, $rxrate, $errnwid, $errcrypt, $errfrag, $errretries, $errbmiss, $errother, $cpuuser, $cpusystem, $cpunice, $cpuidle, $cpuiowait, $cpuirq, $cpusoftirq); @@ -688,7 +688,7 @@ sub printResults { print "multigraph airos_ack\n"; print "acttimeout.value " . $acttimeout . "\n"; print "\n"; - + print "multigraph airos_dbm\n"; print "txsignal.value " . $txsignal . "\n"; print "noisefloor.value " . $noisefloor . "\n"; diff --git a/plugins/network/umts_sig b/plugins/network/umts_sig index b66a9b55..674a697e 100755 --- a/plugins/network/umts_sig +++ b/plugins/network/umts_sig @@ -1,5 +1,5 @@ #!/bin/bash -# +# # A Munin Plugin to show umts signal strength using gcom # Created by Derik Vercueil # Based on a work of "auth" diff --git a/plugins/network/upnpc_ b/plugins/network/upnpc_ index e3c933c5..36ce6fbd 100755 --- a/plugins/network/upnpc_ +++ b/plugins/network/upnpc_ @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/sh -u # -*- sh -*- : << =cut @@ -8,7 +8,8 @@ upnpc_ - Plugin to monitor routers via UPnP This plugin uses the upnpc utility (package miniupnpc in Debian), to monitor an -router using UPnP. It can monitor the following aspects, and plot them as separate graphs: +router using UPnP. It can monitor the following aspects, and plot them as +separate graphs, or a single multigraph (if linked at upnpc or upnpc_multi: * uptime: how long the link has been up; * bitrate: the up and downlink bitrate (e.g., sync speed for DSL); * traffic: the actual up and downstream traffic rate; @@ -16,19 +17,38 @@ router using UPnP. It can monitor the following aspects, and plot them as separa =head1 APPLICABLE SYSTEMS -Linux systems with upnpc installed. +Linux systems with upnpc installed (miniupnpc package). =head1 CONFIGURATION -None needed. +If you do not want to show the link maximum bitrates, add the following +plugin-configuration: + + [upnpc*] + env.traffic_remove_max true + +You can display the graph on another host (e.g., the actual router) than the +one running upnpc. To do so, first configure the plugin to use a different +hostname. + + env.host_name router + +Then configure munin (in /etc/munin/munin-conf or /etc/munin/munin-conf.d), to +support a new host. + + [example.net;router] + address 127.0.0.1 + use_node_name no =head1 AUTHOR -Olivier Mehani +Olivier Mehani + +Copyright (C) 2016,2019 Olivier Mehani =head1 LICENSE -GPLv2 +SPDX-License-Identifier: GPL-3.0-or-later =head1 MAGIC MARKERS @@ -37,24 +57,55 @@ GPLv2 =cut -autoconf() { - which upnpc >/dev/null && upnpc -s >/dev/null 2>&1 && echo yes || echo "no (No upnpc or no UPnP router)" +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + set -x +fi + +PLUGIN_NAME="$(basename "${0}")" +MODE="$(echo "${PLUGIN_NAME}" | sed 's/.*_//')" +# If called without a mode, default to multigraph +[ "$MODE" = "upnpc" ] && MODE="multi" + +get_data() { + if ! command -v upnpc >/dev/null; then + echo "upnpc not found (miniupnpc package)" >&2 + exit 1 + fi + + upnpc -s } -suggest () { - upnpc -s | sed -n " \ - s/.*uptime=.*/uptime/p; \ - s/.*MaxBitRate.*/bitrate/p; \ +get_supported_modes() { + DATA=$1 + echo "${DATA}" | sed -n " \ s/.*Bytes.*/traffic/p; \ s/.*Packets.*/pkts/p; \ - " + s/.*uptime=.*/uptime/p; \ + " +} + +autoconf() { + if ! command -v upnpc >/dev/null; then + echo "no (upnpc not found [miniupnpc package])" + return + fi + upnpc -s 2>/dev/null | grep -q 'List.*devices.*found' && echo yes \ + || echo "no (No UPnP router detected)" +} + + +suggest () { + for mode in ${SUPPORTED_MODES}; do + echo "${mode}" + done + echo "multi" } config () { - case $1 in + case ${1} in "uptime") cat << EOF -graph_title Uplink connection uptime +graph_title Uplink connection uptime${HOST_TITLE} graph_args -l 0 graph_category network graph_scale no @@ -62,46 +113,62 @@ graph_vlabel uptime in hours uptime.label uptime uptime.draw AREA uptime.cdef uptime,3600,/ +${HOST_NAME} EOF ;; "bitrate") cat << EOF -graph_title Uplink bitrate +graph_title [DEPRECATED] Uplink bitrate${HOST_TITLE} graph_args --base 1000 -l 0 graph_category network graph_vlabel bitrate down (-) / up (+) down.label bps -down.warning 4194304: -down.critical 1048576: up.label bps -up.warning 524288: -up.critical 131072: down.graph no up.negative down +${HOST_NAME} EOF ;; "traffic") cat << EOF -graph_title Uplink traffic -graph_args --base 1024 -l 0 +graph_title Uplink traffic${HOST_TITLE} +graph_args --base 1000 -l 0 graph_category network -graph_vlabel bytes in (-) / out (+) per ${graph_period} -down.label Bps +graph_vlabel bits per second in (-) / out (+) +EOF + if [ "${traffic_remove_max:-false}" != 'true' ]; then + cat << EOF +maxdown.label bps (max) +maxup.label bps (max) +maxdown.graph no +maxup.negative maxdown +EOF + fi + cat << EOF +down.label bps down.type DERIVE down.min 0 -up.label Bps +down.cdef down,8,* +up.label bps up.type DERIVE up.min 0 +up.cdef up,8,* down.graph no up.negative down +${HOST_NAME} EOF ;; "pkts") + # ${graph_period} is not a shell variable cat << EOF -graph_title Uplink packets +graph_title Uplink packets${HOST_TITLE} graph_args --base 1000 -l 0 graph_category network -graph_vlabel packets in (-) / out (+) per ${graph_period} +EOF + # ${graph_period} is not a shell variable + # shellcheck disable=SC2016 + echo 'graph_vlabel packets in (-) / out (+) per ${graph_period}' + cat << EOF down.label pps down.type DERIVE down.min 0 @@ -110,47 +177,94 @@ up.type DERIVE up.min 0 down.graph no up.negative down +${HOST_NAME} EOF ;; - "*") - echo "$0: unknown mode '$1'" >&2 + "multi") + echo "${HOST_NAME}" + # Don't repeat HOST_NAME in sub-configs + HOST_NAME="" + echo "multigraph ${PLUGIN_NAME}" + config "traffic" + for mode in ${SUPPORTED_MODES}; do + echo "multigraph ${PLUGIN_NAME}.${mode}" + config "${mode}" + done + ;; + *) + echo "unknown mode '${1}'" >&2 exit 1 - esac + ;; + esac } fetch () { - case $1 in + case "${1}" in "uptime") - upnpc -s | sed -n "s/.*uptime=\([0-9]\+\)s.*/uptime.value \1/p" + echo "${DATA}" | sed -n "s/.*uptime=\([0-9]\+\)s.*/uptime.value \1/p" ;; "bitrate") - upnpc -s | sed -n "s/^MaxBitRateDown : \([0-9]\+\) bps.*MaxBitRateUp \([0-9]\+\) bps.*/down.value \1\nup.value \2/p" + echo "${DATA}" | sed -n "s/^MaxBitRateDown : \([0-9]\+\) bps.*MaxBitRateUp \([0-9]\+\) bps.*/down.value \1\nup.value \2/p" ;; "traffic") - upnpc -s | sed -n "s/^Bytes:\s*Sent:\s*\([0-9]\+\).*Recv:\s*\([0-9]\+\).*/up.value \1\ndown.value \2/p" + echo "${DATA}" | sed -n " + s/^Bytes:\s*Sent:\s*\([0-9]\+\).*Recv:\s*\([0-9]\+\).*/up.value \1\ndown.value \2/p" + if [ "${traffic_remove_max:-false}" != 'true' ]; then + echo "${DATA}" | sed -n " + s/^MaxBitRateDown : \([0-9]\+\) bps.*MaxBitRateUp \([0-9]\+\) bps.*/maxdown.value \1\nmaxup.value \2/p" + fi ;; "pkts") - upnpc -s | sed -n "s/^Packets:\s*Sent:\s*\([0-9]\+\).*Recv:\s*\([0-9]\+\).*/up.value \1\ndown.value \2/p" + echo "${DATA}" | sed -n "s/^Packets:\s*Sent:\s*\([0-9]\+\).*Recv:\s*\([0-9]\+\).*/up.value \1\ndown.value \2/p" ;; - "*") - echo "$0: unknown mode '$1'" >&2 + "multi"|"upnpc") + echo "multigraph ${PLUGIN_NAME}" + fetch "traffic" + for mode in ${SUPPORTED_MODES}; do + echo "multigraph ${PLUGIN_NAME}.${mode}" + fetch "${mode}" + done + ;; + *) + echo "unknown mode '${1}'" >&2 exit 1 - esac + ;; + esac } -mode=`echo $0 | sed 's/.*_//'` +if [ "${1:-}" = "autoconf" ]; then + autoconf + exit 0 +fi -case $1 in - "autoconf") - autoconf - ;; +# do data-based detection here, rather than in +# config() as we don't want to do this multiple times +# when the function calls itself for multigraphs +DATA=$(get_data) +SUPPORTED_MODES=$(get_supported_modes "${DATA}") + +HOST=${host_name:-} +HOST_TITLE="" +HOST_NAME="host_name ${HOST}" +if [ -z "${HOST}" ]; then + HOST=$(echo "${DATA}" | sed -n "s#.*desc: http://\([^/:]\+\).*#\1#p") + # Only add the host name to the title if autodetected + HOST_TITLE=" ($HOST)" + # ...but not as a separate host + HOST_NAME="" +fi + +case ${1:-} in "suggest") suggest ;; "config") - config $mode + config "${MODE}" + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then + fetch "${MODE}" + fi ;; *) - fetch $mode + fetch "${MODE}" ;; esac diff --git a/plugins/network/vnstat b/plugins/network/vnstat index 65a52f8d..afdaf9f3 100755 --- a/plugins/network/vnstat +++ b/plugins/network/vnstat @@ -26,7 +26,7 @@ if [ "$1" = "config" ]; then echo 'totalrx.info Total data received.' echo 'totalrx.cdef totalrx,1000000,*' exit 0 - + fi; diff --git a/plugins/network/vnstat_ b/plugins/network/vnstat_ index 9fb9c039..0094ca13 100755 --- a/plugins/network/vnstat_ +++ b/plugins/network/vnstat_ @@ -11,7 +11,7 @@ # # 2009.09.28 _KaszpiR_ # - quick an ddirty update to support multiple interfaces, for example symlink vnstat_ to vnstat_eth0 and vnstat_eth1 -# rember to run before that vnstat -u -i eth0 and vnstat -u -i eth1 to build databases (read manual of vnstat) +# remember to run before that vnstat -u -i eth0 and vnstat -u -i eth1 to build databases (read manual of vnstat) # other updates of this script maybe soon @@ -34,7 +34,7 @@ if [ "$1" = "config" ]; then echo 'totalrx.info Total data received.' echo 'totalrx.cdef totalrx,1000000,*' exit 0 - + fi; diff --git a/plugins/network/zenus_ b/plugins/network/zenus_ index 319bf2fd..561a7e93 100755 --- a/plugins/network/zenus_ +++ b/plugins/network/zenus_ @@ -126,7 +126,7 @@ sub save_data { # Push the hash values on to the array foreach ( keys %$hashref ) { - push @save_vector, $_ . '' . $hashref->{$_}; + push @save_vector, $_ . '¬' . $hashref->{$_}; } $::logger->info( @@ -148,7 +148,7 @@ sub load_data { my $hashref; foreach (@save_vector) { - my ( $key, $value ) = split //; + my ( $key, $value ) = split /¬/; $hashref->{$key} = $value; } my $force_save = 0; @@ -233,9 +233,9 @@ sub load_data { if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { if ($ret) { print "no ($ret)\n"; - exit 1; + } else { + print "yes\n"; } - print "yes\n"; exit 0; } @@ -262,7 +262,7 @@ if ( defined $ARGV[0] and $ARGV[0] eq "config" ) { print <{name} upload.label Uploaded diff --git a/plugins/newznab/nn_ b/plugins/newznab/nn_ index 0bb1c601..41f13395 100755 --- a/plugins/newznab/nn_ +++ b/plugins/newznab/nn_ @@ -101,7 +101,7 @@ $graphs{releases} = { }, data_source_attrs => { min => '0', - + }, }, data_sources => [ @@ -254,7 +254,7 @@ sub main { 'config' => \&config, 'show' => \&show, ); - + die "Unknown command: $command" unless exists $commands{$command}; return $commands{$command}->($graph); } @@ -265,7 +265,7 @@ sub show { unless $graphs{$graph_name}; my $graph = $graphs{$graph_name}; - run_queries($graph_name); + run_queries($graph_name); for my $ds (@{$graph->{data_sources}}) { printf "%s.value %s\n", clean_fieldname($ds->{label}), ($data->{$ds->{name}} ? $data->{$ds->{name}} : '0'); } @@ -298,22 +298,22 @@ sub run_queries { } sub update_requests { - my ($dbh) = @_; + my ($dbh) = @_; my %queries = ( - request => 'select count(*) as requests from userrequests where timestamp > now() - INTERVAL 5 MINUTE;', - download => 'select count(*) as downloads from userdownloads where timestamp > now() - INTERVAL 5 MINUTE;', - ); + request => 'select count(*) as requests from userrequests where timestamp > now() - INTERVAL 5 MINUTE;', + download => 'select count(*) as downloads from userdownloads where timestamp > now() - INTERVAL 5 MINUTE;', + ); for my $name ( qw(request download) ) { - my $query = $queries{$name}; - my $sth = $dbh->prepare($query); - $sth->execute(); - while (my $row = $sth->fetch) { + my $query = $queries{$name}; + my $sth = $dbh->prepare($query); + $sth->execute(); + while (my $row = $sth->fetch) { $data->{$name} = $row->[0]; } - $sth->finish(); - } + $sth->finish(); + } } - + sub update_category { my ($dbh) = @_; my $sth = $dbh->prepare('select count(*) as releases, category.title, category.id from releases LEFT JOIN category ON releases.categoryID = category.ID group by releases.categoryID'); @@ -322,7 +322,7 @@ sub update_category { while (my $row = $sth->fetch) { $data->{$row->[2]} = $row->[0]; } - + } sub update_releases { my ($dbh) = @_; @@ -331,7 +331,7 @@ sub update_releases { $sth->execute(); }; if ($@) { die $@; } - my $row = $sth->fetchrow_hashref(); + my $row = $sth->fetchrow_hashref(); $data->{releases} = $row->{'releases'}; $sth->finish(); } @@ -343,7 +343,7 @@ sub update_users { $sth->execute(); }; if ($@) { die $@; } - my $row = $sth->fetchrow_hashref(); + my $row = $sth->fetchrow_hashref(); $data->{users} = $row->{'users'}; $sth->finish(); } diff --git a/plugins/nextcloud/nextcloud_ b/plugins/nextcloud/nextcloud_ new file mode 100755 index 00000000..e7e57fcd --- /dev/null +++ b/plugins/nextcloud/nextcloud_ @@ -0,0 +1,235 @@ +#!/bin/sh + +set -e + +: << =cut + +=head1 NAME + +nextcloud_ - Monitor usage of nextcloud instances + +=head1 APPLICABLE SYSTEMS + +Nexcloud instances + +=head1 CONFIGURATION + +Requires installed curl and jq, a command-line json processor. + +This is a wildcard plugin. To monitor a nextcloud instance, link +nextcloud_ to this file. You can even append a port +(:8443) to the file if needed. For example, + + ln -s /usr/share/munin/plugins/nextcloud_ \ + /etc/munin/plugins/nextcloud_cloud.domain.tld + +Set username and password in your munin-node configuration + +[nextcloud_cloud.domain.tld] +env.username +env.password + +It's advised to set an app password (for this plugin) in your nextcloud +instance and not to use the "real" password of your nextcloud user. + +=head1 AUTHOR + +Copyright (C) 2020 Sebastian L. (https://momou.ch) + +=head1 LICENSE + +GPLv2 + +=head1 MAGIC MARKERS + + #%# family=manual + #%# capabilities=autoconf + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +API_PATH="/ocs/v2.php/apps/serverinfo/api/v1/info?format=json" +DOMAIN="${0##*nextcloud_}" +CLEANDOMAIN="$(clean_fieldname "$DOMAIN")" +USERNAME="${username:-}" +PASSWORD="${password:-}" + +print_json_data() { + local FIRST="$1" + [ -z "$FIRST" ] && exit 0 + shift 1 + for KEY in "$@"; do + VALUE=$(echo "$FIRST" | jq -cr ".$KEY") + echo "$KEY.value $VALUE" + done +} + +test_https() { + [ -z "$DOMAIN" ] && exit 0 + curl -s -f -m 2 -I "https://$DOMAIN" > /dev/null && echo true && exit 0 +} + +case $1 in + + autoconf) + if [ -x /usr/bin/curl ]; then + if [ -x /usr/bin/jq ]; then + [ "$(test_https)" ] && DOMAIN="https://$DOMAIN" || DOMAIN="http://$DOMAIN" + curl -s -f -m 2 -u "$USERNAME:$PASSWORD" -I "$DOMAIN$API_PATH" | grep -iq "Content-Type: application/json" && echo "yes" && exit 0 || echo "no (invalid or empty response from nextlcoud serverinfo api)" && exit 0 + else + echo "no (jq not found)" && exit 0 + fi + else + echo "no (/usr/bin/curl not found)" && exit 0 + fi + ;; + + config) + +cat << EOM +multigraph nextcloud_users_$CLEANDOMAIN +graph_title Nextcloud users on $DOMAIN +graph_args --base 1000 -l 0 +graph_printf %.0lf +graph_vlabel connected users +graph_info number of connected user +graph_category cloud +last5minutes.label last 5 minutes +last5minutes.info users connected in the last 5 minutes +last5minutes.min 0 +last1hour.label last hour +last1hour.info users connected in the last hour +last1hour.min 0 +last24hours.label last 24 hours +last24hours.info users connected in the last 24 hours +last24hours.min 0 +num_users.label number of users +num_users.info total number of users +num_users.min 0 +multigraph nextcloud_files_$CLEANDOMAIN +graph_title Nextcloud files on $DOMAIN +graph_args --base 1000 -l 0 +graph_printf %.0lf +graph_vlabel number of files +graph_info number of files +graph_category cloud +num_files.label number of files +num_files.info current number of files +num_files.min 0 +multigraph nextcloud_shares_$CLEANDOMAIN +graph_title Nextcloud shares on $DOMAIN +graph_args --base 1000 -l 0 +graph_printf %.0lf +graph_vlabel number of shares +graph_info number of shares +graph_category cloud +num_shares.label total number of shares +num_shares.info current over all total of shares +num_shares.min 0 +num_shares_user.label user shares +num_shares_user.info current total of user shares +num_shares_user.min 0 +num_shares_groups.label group shares +num_shares_groups.info current total of group shares +num_shares_groups.min 0 +num_shares_link.label link shares +num_shares_link.info current total of link shares +num_shares_link.min 0 +num_shares_mail.label mail shares +num_shares_mail.info current total of mail shares +num_shares_mail.min 0 +num_shares_room.label room shares +num_shares_room.info current total of room shares +num_shares_room.min 0 +num_shares_link_no_password.label link shares without password protection +num_shares_link_no_password.info current total of link shares without password protection +num_shares_link_no_password.min 0 +num_fed_shares_sent.label federated shares sent +num_fed_shares_sent.info current total of federated shares sent +num_fed_shares_sent.min 0 +num_fed_shares_received.label federated shares received +num_fed_shares_received.info current total of federated shares received +num_fed_shares_received.min 0 +multigraph nextcloud_dbsize_$CLEANDOMAIN +graph_title Nextcloud database size on $DOMAIN +graph_args --base 1024 -l 0 +graph_vlabel size in bytes +graph_info database database size in bytes +graph_category cloud +db_size.label database size in bytes +db_size.info database size in bytes +db_size.draw AREA +db_size.min 0 +multigraph nextcloud_storages_$CLEANDOMAIN +graph_title Nextcloud storages on $DOMAIN +graph_args --base 1000 -l 0 +graph_printf %.0lf +graph_vlabel number +graph_info number of storages +graph_category cloud +num_storages.label total number of storages +num_storages.info current total of storages +num_storages.min 0 +num_storages_local.label number of local storages +num_storages_local.info current number of local storages +num_storages_local.min 0 +num_storages_home.label number of home storages +num_storages_home.info current number of home storages +num_storages_home.min 0 +num_storages_other.label number of other storages +num_storages_other.info current number of other storages +num_storages_other.min 0 +multigraph nextcloud_apps_$CLEANDOMAIN +graph_title Nextcloud apps on $DOMAIN +graph_args --base 1000 -l 0 +graph_printf %.0lf +graph_vlabel apps +graph_info number of installed and updatable apps +graph_category cloud +num_updates_available.label available app updates +num_updates_available.info number of available app updates +num_updates_available.min 0 +num_updates_available.warning 1 +num_installed.label installed apps +num_installed.info number of installed apps +num_installed.min 0 +EOM + exit 0 + ;; + +esac + +# Get JSON data +[ "$(test_https)" ] && DOMAIN="https://$DOMAIN" || DOMAIN="http://$DOMAIN" +JSONSTATS=$(curl -s -f -m 2 -u "$USERNAME:$PASSWORD" "$DOMAIN$API_PATH" | sed 's/\\/\\\\/g' | jq -cr ".ocs.data") +USERS=$(echo "$JSONSTATS" | jq -cr ".activeUsers") +STORAGE=$(echo "$JSONSTATS" | jq -cr ".nextcloud.storage") +SHARES=$(echo "$JSONSTATS" | jq -cr ".nextcloud.shares") +DBSIZE=$(echo "$JSONSTATS" | jq -cr ".server.database.size") +APPS=$(echo "$JSONSTATS" | jq -cr ".nextcloud.system.apps") + +# users +echo "multigraph nextcloud_users_$CLEANDOMAIN" +print_json_data "$USERS" last5minutes last1hour last24hours +print_json_data "$STORAGE" num_users + +# files +echo "multigraph nextcloud_files_$CLEANDOMAIN" +print_json_data "$STORAGE" num_files + +# storages +echo "multigraph nextcloud_storages_$CLEANDOMAIN" +print_json_data "$STORAGE" num_storages num_storages_local num_storages_home num_storages_other + +# shares +echo "multigraph nextcloud_shares_$CLEANDOMAIN" +print_json_data "$SHARES" num_shares num_shares_user num_shares_groups num_shares_link num_shares_mail num_shares_room num_shares_link_no_password num_fed_shares_sent num_fed_shares_received + +# dbsize +echo "multigraph nextcloud_dbsize_$CLEANDOMAIN" +echo "db_size.value $DBSIZE" + +# apps +echo "multigraph nextcloud_apps_$CLEANDOMAIN" +print_json_data "$APPS" num_installed num_updates_available diff --git a/plugins/nfs-freebsd/nfs_client b/plugins/nfs-freebsd/nfs_client old mode 100644 new mode 100755 index 312fb6f2..ef930f4e --- a/plugins/nfs-freebsd/nfs_client +++ b/plugins/nfs-freebsd/nfs_client @@ -37,16 +37,16 @@ if [ "$1" = "autoconf" ]; then fi fi -labels=`$NFSSTAT -c | grep -iv "[0-9]" | grep -v ":" | sed 's/X\ /x_/' | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26}' | tr '[A-Z]' '[a-z]'` -values=`$NFSSTAT -c | grep -i "[0-9]" | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26}'` +labels=$("$NFSSTAT" -c | grep -iv "[0-9]" | grep -v ":" | sed 's/X\ /x_/' | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26}' | tr '[:upper:]' '[:lower:]') +values=$("$NFSSTAT" -c | grep -i "[0-9]" | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26}') -larray=( $labels ) -varray=( $values ) +read -r -a larray <<<"$labels" +read -r -a varray <<<"$values" if [ "$1" = "config" ]; then echo 'graph_title NFSv3 Client' echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel requests / ${graph_period}' + echo "graph_vlabel requests / \${graph_period}" echo 'graph_total total' echo 'graph_category fs' for a in $labels; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done diff --git a/plugins/nfs-freebsd/nfs_client_cache b/plugins/nfs-freebsd/nfs_client_cache old mode 100644 new mode 100755 index 9c5d3441..3be86392 --- a/plugins/nfs-freebsd/nfs_client_cache +++ b/plugins/nfs-freebsd/nfs_client_cache @@ -37,16 +37,16 @@ if [ "$1" = "autoconf" ]; then fi fi -labels=`$NFSSTAT -c | grep -iv "[0-9]" | tail -n 2 | sed 's/BioRLHits/BioRL\ Hits/g' | awk '{print $1"_"$2,$1"_"$3,$4"_"$5,$4"_"$6,$7"_"$8,$7"_"$9,$10"_"$11,$10"_"$12}' | tr '\n' ' ' | tr '[A-Z]' '[a-z]'` -values=`$NFSSTAT -c | grep -i "[0-9]" | tail -n 2 | tr '\n' ' '` +labels=$("$NFSSTAT" -c | grep -iv "[0-9]" | tail -n 2 | sed 's/BioRLHits/BioRL\ Hits/g' | awk '{print $1"_"$2,$1"_"$3,$4"_"$5,$4"_"$6,$7"_"$8,$7"_"$9,$10"_"$11,$10"_"$12}' | tr '\n' ' ' | tr '[:upper:]' '[:lower:]') +values=$("$NFSSTAT" -c | grep -i "[0-9]" | tail -n 2 | tr '\n' ' ') -larray=( $labels ) -varray=( $values ) +read -r -a larray <<<"$labels" +read -r -a varray <<<"$values" if [ "$1" = "config" ]; then echo 'graph_title NFSv3 Client Cache' echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel requests / ${graph_period}' + echo "graph_vlabel requests / \${graph_period}" echo 'graph_total total' echo 'graph_category fs' for a in $labels; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done diff --git a/plugins/nfs-freebsd/nfsd b/plugins/nfs-freebsd/nfsd index d551b844..d2e3d848 100755 --- a/plugins/nfs-freebsd/nfsd +++ b/plugins/nfs-freebsd/nfsd @@ -38,17 +38,17 @@ if [ "$1" = "autoconf" ]; then fi fi -labels=`$NFSSTAT -s | grep -iv "[0-9]" | grep -v ":" | sed 's/Server\ //' | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30}' | tr '[A-Z]' '[a-z]'` -values=`$NFSSTAT -s | grep -i "[0-9]" | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30}'` +labels=$("$NFSSTAT" -s | grep -iv "[0-9]" | grep -v ":" | sed 's/Server\ //' | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30}' | tr '[:upper:]' '[:lower:]') +values=$("$NFSSTAT" -s | grep -i "[0-9]" | tr '\n' ' ' | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30}') -larray=( $labels ) -varray=( $values ) +read -r -a larray <<<"$labels" +read -r -a varray <<<"$values" if [ "$1" = "config" ]; then echo 'graph_title NFSv3 Server' echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel requests / ${graph_period}' + echo "graph_vlabel requests / \${graph_period}" echo 'graph_total total' echo 'graph_category fs' for a in $labels; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done diff --git a/plugins/nfs/nfsv4 b/plugins/nfs/nfsv4 index 1de3e7e0..e7fe68bc 100755 --- a/plugins/nfs/nfsv4 +++ b/plugins/nfs/nfsv4 @@ -28,7 +28,7 @@ # Set categories on a bunch of plugins # # Revision 1.3 2004/05/15 21:33:29 jimmyo -# "Upped" som plugins from contrib/manual to manual or auto. +# "Upped" some plugins from contrib/manual to manual or auto. # # Revision 1.2 2004/05/06 21:55:18 jimmyo # Added patch to contrib-plugin linux/nfsd, to graph rpc count (Alexandre Dupouy). @@ -48,11 +48,10 @@ proc="access close commit create delegpurge delegreturn getattr getfh link lock if [ "$1" = "autoconf" ]; then if [ -f "$NFSD" ]; then echo yes - exit 0 else echo "no (no $NFSD)" - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/nginx/nginx-cache-multi_ b/plugins/nginx/nginx-cache-multi_ index 05245ffc..97e9f25f 100755 --- a/plugins/nginx/nginx-cache-multi_ +++ b/plugins/nginx/nginx-cache-multi_ @@ -11,7 +11,7 @@ # This plugin generates two graphs - with number of requests and with percents. # Create these symlinks: # ln -s /usr/share/munin/plugins/nginx-cache-multi_ /etc/munin/plugins/nginx-cache-multi_number -# ls -s /usr/share/munin/plugins/nginx-cache-multi_ /etc/munin/plugins/nginx-cache-multi_percent +# ln -s /usr/share/munin/plugins/nginx-cache-multi_ /etc/munin/plugins/nginx-cache-multi_percent # # You can override the log file location. # diff --git a/plugins/nginx/nginx-combined b/plugins/nginx/nginx-combined index 0adf8362..c7e52085 100755 --- a/plugins/nginx/nginx-combined +++ b/plugins/nginx/nginx-combined @@ -5,12 +5,12 @@ #%# capabilities=autoconf # nginx_combine_ --- Determine the current status of Nginx # using the http_stub_status module. -# extend of nginx_status_ plugin of Antnio P. P. Almeida +# extend of nginx_status_ plugin of António P. P. Almeida -# Copyright (C) 2010 Antnio P. P. Almeida +# Copyright (C) 2010 António P. P. Almeida # Copyright (C) 2010 Minato Miray -# Author: Antnio P. P. Almeida , +# Author: António P. P. Almeida , # Author: Minato Miray ####################################### @@ -48,16 +48,16 @@ if ( exists $ARGV[0] and $ARGV[0] eq "autoconf" ) { if ($ret){ print "no ($ret)\n"; - exit 1; + exit 0; } - + my $ua = LWP::UserAgent->new(timeout => 30); my $response = $ua->request(HTTP::Request->new('GET',$URL)); unless ($response->is_success and $response->content =~ /server/im) { print "no (no nginx status on $URL)\n"; - exit 1; + exit 0; } else { @@ -75,15 +75,15 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) print "reqpsec.label Request/sec.\n"; print "reqpsec.info Request/sec.\n"; - print "reqpsec.draw LINE2\n"; + print "reqpsec.draw LINE2\n"; print "conpersec.label Connection/sec.\n"; print "conpersec.info Connection/sec.\n"; - print "conpersec.draw LINE2\n"; + print "conpersec.draw LINE2\n"; print "reqpcon.label Request/conn.\n"; print "reqpcon.info Request/conn.\n"; - print "reqpcon.draw LINE2\n"; + print "reqpcon.draw LINE2\n"; print "total.label Active connections\n"; print "total.info Active connections\n"; @@ -91,16 +91,16 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) print "reading.label Reading\n"; print "reading.info Reading\n"; - print "reading.draw LINE2\n"; + print "reading.draw LINE2\n"; print "writing.label Writing\n"; print "writing.info Writing\n"; - print "writing.draw LINE2\n"; + print "writing.draw LINE2\n"; print "waiting.label Waiting\n"; print "waiting.info Waiting\n"; - print "waiting.draw LINE2\n"; - + print "waiting.draw LINE2\n"; + exit 0; } @@ -145,11 +145,11 @@ if (defined $tmp2_conpsec && $tmp2_conpsec =~ /^[+-]?\d+$/ && $tmp2_conpsec > 0 $conpersec=$tmp2_conpsec-$tmp1_conpsec; } if (defined $tmp2_reqpsec && $tmp2_reqpsec =~ /^[+-]?\d+$/ && $tmp2_reqpsec > 0){ - $reqpsec=$tmp2_reqpsec-$tmp1_reqpsec; + $reqpsec=$tmp2_reqpsec-$tmp1_reqpsec; } if ($conpersec > 0){ $reqpcon=$reqpsec/$conpersec; -} +} print "reqpsec.value $reqpsec\n"; print "conpersec.value $conpersec\n"; diff --git a/plugins/nginx/nginx_byprojects/README.md b/plugins/nginx/nginx_byprojects/README.md index 7f93095f..0bde3568 100644 --- a/plugins/nginx/nginx_byprojects/README.md +++ b/plugins/nginx/nginx_byprojects/README.md @@ -2,21 +2,21 @@ Those plugins are used to monitor different projects or vhost (i.e. either different log files or using regular expression as filters) on the same web server. ## munin_byprojects_access -Count the number of hits per projects/vhost. +Count the number of hits per projects/vhost. ![byproject_access](https://www.mantor.org/~northox/misc/munin-plugins/nginx_byprojects_access1-month.png "byproject_access") ## munin_byprojects_bandwidth -Count the total bandwidth used by each projects/vhost. [Logtail](https://www.fourmilab.ch/webtools/logtail/) is required. +Count the total bandwidth used by each projects/vhost. [Logtail](https://www.fourmilab.ch/webtools/logtail/) is required. ![byproject_bandwidth](https://www.mantor.org/~northox/misc/munin-plugins/apache_byprojects_bandwidth-month.png "byproject_bandwidth") ## munin_byprojects_inout_bandwidth -Counts the in/out bandwidth used by each projects/vhost. [Logtail](https://www.fourmilab.ch/webtools/logtail/) is required. +Counts the in/out bandwidth used by each projects/vhost. [Logtail](https://www.fourmilab.ch/webtools/logtail/) is required. ![byproject_inout_bandwidth](https://www.mantor.org/~northox/misc/munin-plugins/apache_byprojects_inout_bandwidth-month.png "byproject_inout_bandwidth") ## Installation The setup is pretty straight forward. First you need to configure the plugin: -In your munin plugin configuration file (for example, a new dedicated /etc/munin/plugin-conf.d/nginx_byprojects), configure the plugins : +In your munin plugin configuration file (for example, a new dedicated /etc/munin/plugin-conf.d/nginx_byprojects), configure the plugins: [byprojects_*] env.logtail /usr/local/bin/logtail @@ -35,6 +35,6 @@ Then link the file just as any other plugins. ln -s /usr/local/sbin/ /usr/local/etc/munin/plugins/ And restart the munin-node service. - + ## License MIT diff --git a/plugins/nginx/nginx_byprojects/byprojects_access b/plugins/nginx/nginx_byprojects/byprojects_access index bc400504..f1246b62 100755 --- a/plugins/nginx/nginx_byprojects/byprojects_access +++ b/plugins/nginx/nginx_byprojects/byprojects_access @@ -4,16 +4,16 @@ use JSON qw(decode_json); # # byprojects_access # -# Perl script to monitor access *byprojects* (e.g. vhost) from multiple files +# Perl script to monitor access *byprojects* (e.g. vhost) from multiple files # and/or regex. # -# Danny Fullerton +# Danny Fullerton # Mantor Organization # This work is licensed under a MIT license. # # You need logtail (https://www.fourmilab.ch/webtools/logtail/) # -# Log can be gathered from multiple sources by simply specifying multiple log +# Log can be gathered from multiple sources by simply specifying multiple log # filename or using wildcards (glob). File content can be selected using regex. # # - 'prod' => [ {'path' => '/home/prod/log/access.log'} ], @@ -73,7 +73,7 @@ foreach my $project ( keys %logs ) { my @paths = glob $log->{'path'}; foreach my $path (@paths) { my $state = $statepath.'/'.$project.$x.'_access.state'; - open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or + open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or die "Can't open $logtail: $!"; while () { my $buf = $_; diff --git a/plugins/nginx/nginx_byprojects/byprojects_bandwidth b/plugins/nginx/nginx_byprojects/byprojects_bandwidth index af245e96..52be2ced 100755 --- a/plugins/nginx/nginx_byprojects/byprojects_bandwidth +++ b/plugins/nginx/nginx_byprojects/byprojects_bandwidth @@ -7,19 +7,19 @@ use JSON qw(decode_json); # Perl script to monitor total bandwidth *byprojects* (e.g. vhost) from multiple # files and/or regex. # -# Danny Fullerton +# Danny Fullerton # Mantor Organization # This work is licensed under a MIT license. # # You need logtail (https://www.fourmilab.ch/webtools/logtail/) # -# Your nginx configuration should look like this (i.e. $request_length +# Your nginx configuration should look like this (i.e. $request_length # body_bytes_sent at the end): # log_format main '$remote_addr - $remote_user $time_local "$request" ' # '$status $body_bytes_sent "$http_referer" ' # '"$http_user_agent" $request_length $body_bytes_sent'; # -# Log can be gathered from multiple sources by simply specifying multiple log +# Log can be gathered from multiple sources by simply specifying multiple log # filename or using wildcards (glob). File content can be selected using regex. # # - 'prod' => [ {'path' => '/home/prod/log/access.log'} ], @@ -80,7 +80,7 @@ foreach my $project ( keys %logs ) { my @paths = glob $log->{'path'}; foreach my $path (@paths) { my $state = $statepath.'/'.$project.$x.'_totalbandwidth.state'; - open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or + open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or die "Can't open $logtail : $!"; while () { my $buf = $_; diff --git a/plugins/nginx/nginx_byprojects/byprojects_inout_bandwidth b/plugins/nginx/nginx_byprojects/byprojects_inout_bandwidth index 21b36902..17f0772c 100755 --- a/plugins/nginx/nginx_byprojects/byprojects_inout_bandwidth +++ b/plugins/nginx/nginx_byprojects/byprojects_inout_bandwidth @@ -4,22 +4,22 @@ use JSON qw(decode_json); # # byprojects_inout_bandwidth # -# Perl script to monitor in/out bandwidth *byprojects* (e.g. vhost) from +# Perl script to monitor in/out bandwidth *byprojects* (e.g. vhost) from # multiple files and/or regex. # -# Danny Fullerton +# Danny Fullerton # Mantor Organization # This work is licensed under a MIT license. # # You need logtail (https://www.fourmilab.ch/webtools/logtail/) # -# Your nginx configuration should look like this (i.e. $request_length +# Your nginx configuration should look like this (i.e. $request_length # body_bytes_sent at the end): # log_format main '$remote_addr - $remote_user $time_local "$request" ' # '$status $body_bytes_sent "$http_referer" ' # '"$http_user_agent" $request_length $body_bytes_sent'; # -# Log can be gathered from multiple sources by simply specifying multiple log +# Log can be gathered from multiple sources by simply specifying multiple log # filename or using wildcards (glob). File content can be selected using regex. # # - 'prod' => [ {'path' => '/home/prod/log/access.log'} ], @@ -83,7 +83,7 @@ foreach my $project ( keys %logs ) { my @paths = glob $log->{'path'}; foreach my $path (@paths) { my $state = $statepath.'/'.$project.$x.'_inoutbandwidth.state'; - open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or + open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or die "Can't open $logtail : $!"; while () { my $buf = $_; diff --git a/plugins/nginx/nginx_connection_request b/plugins/nginx/nginx_connection_request index 989afa64..a12171fa 100755 --- a/plugins/nginx/nginx_connection_request +++ b/plugins/nginx/nginx_connection_request @@ -112,7 +112,7 @@ my $UA = exists $ENV{'ua'} ? $ENV{'ua'} : 'nginx-status-verifier/0.1'; if (exists $ARGV[0] and $ARGV[0] eq "autoconf" ) { if ($ret) { print "no ($ret)\n"; - exit 1; + exit 0; } my $ua = LWP::UserAgent->new(timeout => 30); # Set the UA to something different from the libwww-perl. @@ -122,7 +122,7 @@ if (exists $ARGV[0] and $ARGV[0] eq "autoconf" ) { unless ($response->is_success and $response->content =~ /server/im) { print "no (no nginx status on $URL)\n"; - exit 1; + exit 0; } else { print "yes\n"; exit 0; diff --git a/plugins/nginx/nginx_error b/plugins/nginx/nginx_error index 34e47b82..a0ff11b5 100755 --- a/plugins/nginx/nginx_error +++ b/plugins/nginx/nginx_error @@ -7,21 +7,27 @@ nginx error - Munin plugin to monitor nginx error rates (http status codes per minute). + =head1 APPLICABLE SYSTEMS -Any Linux host, running nginx, with bash version > 4.0 +Any host running nginx, with bash version > 4.0 + =head1 CONFIGURATION This shows the default configuration of this plugin. You can override the log file path and the logpattern. +Additionally you may want to adjust 'group' (or 'user') based on the +permissions required for reading the log file. [nginx_error] + group adm env.logpath /var/log/nginx env.logpattern a.*.log Nginx must also be configured to log accesses in "combined" log format (default) + =head1 USAGE Link this plugin to /etc/munin/plugins/ and restart the munin-node. @@ -33,51 +39,68 @@ will parse the log file /var/log/nginx/a.mydomaincom.log You can change 'env.logpattern' using asterisk ('*') to match your logs filenames. +'env.logpattern' is ignored for a non-symlink configuration. + + =head1 INTERPRETATION The plugin shows nginx http "error" status rates by parsing access log. + =head1 MAGIC MARKERS #%# family=auto #%# capabilities=autoconf + =head1 BUGS None known. + =head1 VERSION -$Id:$ +1.1 - 2018/01/20 + * add 'dirty config' capability support + * fix shell style issues reported by shellcheck + * improve readability of symlink configuration code + +1.0 - 2017/02/21 + =head1 AUTHOR vovansystems@gmail.com, 2013 + =head1 LICENSE GPLv3 =cut -if [ -z $logpath ]; then - logpath='/var/log/nginx' -fi -name=`basename $0` +set -eu -domain=${name/nginx_error/} -if [[ $domain != '_' && ${#domain} -ne 0 ]]; then - domain=${domain:1} - if [ -z $logpattern ]; then - logpattern='a.*.log' - fi - logpattern=${logpattern/\*/$domain} + +# default environment variable values +logpath=${logpath:-/var/log/nginx} + + +# derive the name of the log file from a potential symlink-configured virtual host +script_name=$(basename "$0") +plugin_suffix=${script_name#nginx_error} +if [ -n "${plugin_suffix#_}" ]; then + # a domain was given via symlink configuration: adjust the logpattern + domain=${plugin_suffix#_} + # default logpattern for symlink configuration mode + logpattern=${logpattern:-a.*.log} + log_filename=${logpattern/\*/$domain} else - logpattern='access.log' + log_filename='access.log' fi -log="$logpath/$logpattern" +log="$logpath/$log_filename" # declaring an array with http status codes, we are interested in declare -A http_codes @@ -94,44 +117,57 @@ http_codes[500]='Internal Server Error' http_codes[502]='Bad Gateway' http_codes[503]='Service Unavailable' -do_ () { # Fetch + +# parse error counts from log file +do_fetch () { + local count status_code declare -A line_counts - values=`awk '{print $9}' $log | sort | uniq -c` + values="$(awk '{print $9}' "$log" | sort | uniq -c)" + # Log files may be empty due to logrotation if [ -n "$values" ]; then - while read -r line; do - read -a tmp <<< "$line"; - line_counts[${tmp[1]}]=${tmp[0]}; + while read -r count status_code; do + line_counts[$status_code]=$count done <<< "$values" fi - for k in ${!http_codes[@]}; do - echo "error$k.value ${line_counts[$k]:-0}" + for status_code in "${!http_codes[@]}"; do + echo "error${status_code}.value ${line_counts[$status_code]:-0}" done - exit 0 } + do_config () { - echo "graph_title $logpattern - Nginx errors per minute" - echo 'graph_vlabel pages with http error codes / ${graph_period}' + local status_code + echo "graph_title $(basename "$log") - Nginx errors per minute" + echo "graph_vlabel pages with http error codes / \${graph_period}" echo "graph_category webserver" echo "graph_period minute" - echo "graph_info This graph shows nginx error amount per minute" - for k in ${!http_codes[@]}; do - echo "error$k.type DERIVE" - echo "error$k.min 0" - echo "error$k.label $k ${http_codes[$k]}" + echo "graph_info This graph shows nginx error rate per minute" + for status_code in "${!http_codes[@]}"; do + echo "error${status_code}.type DERIVE" + echo "error${status_code}.min 0" + echo "error${status_code}.label $status_code ${http_codes[$status_code]}" done - exit 0 } + do_autoconf () { echo yes - exit 0 } -case $1 in - config|autoconf|'') - eval do_$1 + +case ${1:-} in + config) + do_config + # support "dirty config" capability + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then do_fetch; fi + ;; + autoconf) + do_autoconf + ;; + '') + do_fetch + ;; esac exit $? diff --git a/plugins/nginx/nginx_upstream b/plugins/nginx/nginx_upstream index 5528e9a1..0bca0f8d 100755 --- a/plugins/nginx/nginx_upstream +++ b/plugins/nginx/nginx_upstream @@ -44,7 +44,7 @@ totaltimeforrequests=0 numberofrequests=0 for line in lines: m = rg.search(line) - if m: + if m: word1=m.group(1) c1=m.group(2) float1=m.group(3) diff --git a/plugins/nginx/nginx_upstream_multi_ b/plugins/nginx/nginx_upstream_multi_ index c76f85a5..455f3594 100755 --- a/plugins/nginx/nginx_upstream_multi_ +++ b/plugins/nginx/nginx_upstream_multi_ @@ -1,31 +1,36 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# vim: set fileencoding=utf-8 +#!/usr/bin/env python3 # -# Munin plugin to monitor requests number, cache statuses, http status codes and average request times of -# specified nginx upstreams. +# Munin plugin to monitor requests number, cache statuses, http status codes and average request +# times of specified nginx upstreams. # # Copyright Igor Borodikhin # # License : GPLv3 # # Configuration parameters: -# env.graphs - which graphs to produce (optional, list of graphs separated by spaces, default - cache http time request) +# env.graphs - which graphs to produce (optional, list of graphs separated by spaces, default - +# cache http time request) # env.log - log file path (mandatory, ex.: /var/log/nginx/upstream.log) -# env.upstream - list of upstreams to monitor (mandatory, including port numbers separated by space, ex.: 10.0.0.1:80 10.0.0.2:8080) -# env.statuses - list of http status codes to monitor (optional, default - all statuses, ex.: 200 403 404 410 500 502) -# env.percentiles - which percentiles to draw on time graphs (optional, list of percentiles separated by spaces, default - 80) +# env.upstream - list of upstreams to monitor (mandatory, including port numbers separated by +# space, e.g.: 10.0.0.1:80 10.0.0.2:8080) +# env.statuses - list of http status codes to monitor (optional, default - all statuses, +# e.g.: 200 403 404 410 500 502) +# env.percentiles - which percentiles to draw on time graphs (optional, list of percentiles +# separated by spaces, default - 80) # # ## Installation -# Copy file to directory /usr/share/munin/pligins/ and create symbolic link(s) for each log file you wish to monitor. +# Copy file to directory /usr/share/munin/pligins/ and create symbolic link(s) for each log file +# you wish to monitor. # # Specify log_format at /etc/nginx/conf.d/upstream.conf: -# log_format upstream "ua=[$upstream_addr] ut=[$upstream_response_time] us=[$upstream_status] cs=[$upstream_cache_status]" +# log_format upstream "ua=[$upstream_addr] ut=[$upstream_response_time] us=[$upstream_status] \ +# cs=[$upstream_cache_status]" # # Use it in your site configuration (/etc/nginx/sites-enabled/anything.conf): # access_log /var/log/nginx/upstream.log upstream; # -# Attention! Because munin-node does not have read permission for nginx log files we need to run it as root. +# Attention! Since the default user (nobody) does not have read permission for nginx log files we +# need to run it as root. # # And specify some options in /etc/munin/plugin-conf.d/munin-node: # @@ -37,58 +42,60 @@ # env.statuses 200 403 404 410 500 502 # env.percentiles 50 80 # -#%# family=contrib +# #%# family=contrib + +import copy +import math +import os +import re +import sys +import time -import os, sys, re, copy, math -from time import time # How we've been called progName = sys.argv[0] -progName = progName[progName.rfind("/")+1:] +progName = progName[progName.rfind("/") + 1:] + # Where to store plugin state -if "MUNIN_PLUGSTATE" in os.environ: - stateDir = os.environ["MUNIN_PLUGSTATE"] -else: - stateDir = None +stateDir = os.environ.get("MUNIN_PLUGSTATE", None) # Which site configuration we should use siteName = progName[len("nginx_upstream_multi_"):] # Log path -if "log" in os.environ: - logPath = os.environ["log"] -else: - logPath = "/var/log/nginx/access.log" +logPath = os.environ.get("log", "/var/log/nginx/access.log") # Http statuses list -httpStatusString = ("100:Continue;101:Switching protocols;102:Processing;200:OK;201:Created;202:Accepted;" -"203:Non-Authoritative Information;204:No content;205:Reset content;206:Partial content;207:Multi-status;" -"226:IM used;300:Multiple choices;301:Moved permanently;302:Moved temporarily;303:See other;304:Not modified;" -"305:Use proxy;307:Temporary redirect;400:Bad request;401:Unauthorized;402:Payment required;403:Forbidden;" -"404:Not found;405:Method not allowed;406:Not acceptable;407:Proxy Authentication Required;408:Request timeout;" -"409:Conflict;410:Gone;411:Length required;412:Precondition failed;413:Request entity too large;" -"414:Request URI too large;415:Usupported media type;416:Request range not satisfiable;417:Expectation failed;" -"422:Unprocessable entity;423:Locked;424:Failed dependency;425:Unordered collection;426:Upgrade required;" -"449:Retry with;456:Unrecoverable error;500:Internal server error;501:Not implemented;502:Bad gateway;" -"503:Service unavailable;504:Gateway timeout;505:HTTP version not supported;506:Variant also negotiates;" -"507:Insufficient storage;508:Loop detected;509:Bandwidth limit exceeded;510:Not extended") +httpStatusString = ( + "100:Continue;101:Switching protocols;102:Processing;200:OK;201:Created;202:Accepted;" + "203:Non-Authoritative Information;204:No content;205:Reset content;206:Partial content;" + "207:Multi-status;226:IM used;300:Multiple choices;301:Moved permanently;" + "302:Moved temporarily;303:See other;304:Not modified;305:Use proxy;307:Temporary redirect;" + "400:Bad request;401:Unauthorized;402:Payment required;403:Forbidden;404:Not found;" + "405:Method not allowed;406:Not acceptable;407:Proxy Authentication Required;" + "408:Request timeout;409:Conflict;410:Gone;411:Length required;412:Precondition failed;" + "413:Request entity too large;414:Request URI too large;415:Unsupported media type;" + "416:Request range not satisfiable;417:Expectation failed;422:Unprocessable entity;" + "423:Locked;424:Failed dependency;425:Unordered collection;426:Upgrade required;" + "449:Retry with;456:Unrecoverable error;500:Internal server error;501:Not implemented;" + "502:Bad gateway;503:Service unavailable;504:Gateway timeout;505:HTTP version not supported;" + "506:Variant also negotiates;507:Insufficient storage;508:Loop detected;" + "509:Bandwidth limit exceeded;510:Not extended") -if "statuses" in os.environ: - statuses = os.environ["statuses"].split() -else: - statuses = [] +# an empty list of wanted statuses is interpreted as: all statuses +statuses = os.environ.get("statuses", "").split() httpStatusList = {} for statusString in httpStatusString.split(";"): [code, title] = statusString.split(":") if len(statuses) > 0 and code in statuses or len(statuses) == 0: httpStatusList[code] = { - "title" : title, - "requests" : 0 + "title": title, + "requests": 0 } -cacheStatusList = { "MISS" : 0, "BYPASS" : 0, "EXPIRED" : 0, "UPDATING" : 0, "STALE" : 0, "HIT" : 0 } +cacheStatusList = {"MISS": 0, "BYPASS": 0, "EXPIRED": 0, "UPDATING": 0, "STALE": 0, "HIT": 0} # Parse upstreams upstreams = {} @@ -97,28 +104,22 @@ if "upstream" in os.environ: upstreamList = upstreamString.split() for upstream in upstreamList: upstreams[upstream] = { - "requests" : 0, - "time" : 0, - "times" : [], - "cache" : copy.deepcopy(cacheStatusList), - "http" : copy.deepcopy(httpStatusList) + "requests": 0, + "time": 0, + "times": [], + "cache": copy.deepcopy(cacheStatusList), + "http": copy.deepcopy(httpStatusList) } else: raise Exception("No upstreams specified") -if "percentiles" in os.environ: - percentiles = os.environ["percentiles"].split() -else: - percentiles = [80] +percentiles = os.environ.get("percentiles", "80").split() -if "graphs" in os.environ: - graphs_enabled = os.environ["graphs"].split() -else: - graphs_enabled = ["cache", "http", "time", "request"] +graphs_enabled = os.environ.get("graphs", "cache http time request").split() -now = int(time()) +now = int(time.time()) -lastBytePath = "%s/nginx_upstream_multi_%s_lastByte.txt" % (stateDir, siteName) +lastBytePath = os.path.join(stateDir, "nginx_upstream_multi_{}_lastByte.txt".format(siteName)) try: lastRun = os.path.getmtime(lastBytePath) except OSError: @@ -128,64 +129,69 @@ except OSError: def sanitize(string): return string.replace(".", "_").replace(":", "_").replace("/", "_").replace("-", "_") + if len(sys.argv) == 2 and sys.argv[1] == "config": # Parent graph declaration - print "multigraph nginx_upstream_multi_%s" % siteName.replace(".", "_") - print "graph_title Requests number" - print "graph_vlabel rps" - print "graph_category webserver" + print("multigraph nginx_upstream_multi_%s" % siteName.replace(".", "_")) + print("graph_title Requests number") + print("graph_vlabel rps") + print("graph_category webserver") for upstream in upstreams.keys(): - print "us%s_requests.label %s" % (sanitize(upstream), upstream) + print("us%s_requests.label %s" % (sanitize(upstream), upstream)) # Requests graph declaration if "request" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_requests" % (sanitize(siteName), sanitize(upstream)) - print "graph_title Requests number - %s" % upstream - print "graph_vlabel rps" - print "graph_category webserver" - print "us%s_requests.label %s" % (sanitize(upstream), upstream) - print "" + print() + print("multigraph nginx_upstream_multi_%s.%s_requests" + % (sanitize(siteName), sanitize(upstream))) + print("graph_title Requests number - %s" % upstream) + print("graph_vlabel rps") + print("graph_category webserver") + print("us%s_requests.label %s" % (sanitize(upstream), upstream)) + print() # Times graph declaration if "time" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_times" % (sanitize(siteName), sanitize(upstream)) - print "graph_title Request time - %s" % upstream - print "graph_vlabel sec." - print "graph_category webserver" - print "us%s_times.label average" % (sanitize(upstream)) + print() + print("multigraph nginx_upstream_multi_%s.%s_times" + % (sanitize(siteName), sanitize(upstream))) + print("graph_title Request time - %s" % upstream) + print("graph_vlabel sec.") + print("graph_category webserver") + print("us%s_times.label average" % (sanitize(upstream))) for percentile in percentiles: - print "us%s_times_percentile_%s.label %s-percentile" % (sanitize(upstream), percentile, percentile) - print "" + print("us%s_times_percentile_%s.label %s-percentile" + % (sanitize(upstream), percentile, percentile)) + print() # HTTP Status codes graph declaration if "http" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_statuses" % (sanitize(siteName), sanitize(upstream)) - print "graph_title HTTP - %s" % upstream - print "graph_vlabel rps" - print "graph_category webserver" - keylist = httpStatusList.keys() - keylist.sort() - for status in keylist: - print "http%s_%s_status.label %s - %s" % (status, sanitize(upstream), status, httpStatusList[status]["title"]) - print "" + print() + print("multigraph nginx_upstream_multi_%s.%s_statuses" + % (sanitize(siteName), sanitize(upstream))) + print("graph_title HTTP - %s" % upstream) + print("graph_vlabel rps") + print("graph_category webserver") + for status in sorted(httpStatusList.keys()): + print("http%s_%s_status.label %s - %s" + % (status, sanitize(upstream), status, httpStatusList[status]["title"])) + print() # Cache status graph declaration if "cache" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_cache" % (sanitize(siteName), sanitize(upstream)) - print "graph_title Cache - %s" % upstream - print "graph_vlabel rps" - print "graph_category webserver" + print() + print("multigraph nginx_upstream_multi_%s.%s_cache" + % (sanitize(siteName), sanitize(upstream))) + print("graph_title Cache - %s" % upstream) + print("graph_vlabel rps") + print("graph_category webserver") for status in cacheStatusList: - print "us%s_%s_cache.label %s" % (sanitize(status), sanitize(upstream), status) - print "" + print("us%s_%s_cache.label %s" % (sanitize(status), sanitize(upstream), status)) + print() else: timeElapsed = now - lastRun @@ -197,13 +203,13 @@ else: except Exception: lastByte = 0 - if lastByteHandle != None: + if lastByteHandle is not None: lastByteHandle.close() try: logHandle = open(logPath, "r") except Exception as e: - print "Log file %s not readable: %s" % (logPath, e.strerror) + print("Log file %s not readable: %s" % (logPath, e.strerror), file=sys.stderr) sys.exit(1) try: @@ -222,59 +228,59 @@ else: if (match): # Extract data address = match.group(1) - time = match.group(2) - status = match.group(3) - cache = match.group(4) + request_time = match.group(2) + status = match.group(3) + cache = match.group(4) # Replace separators by space address = address.replace(",", " ") address = address.replace(" : ", " ") - address = re.sub("\s+", " ", address) + address = re.sub(r"\s+", " ", address) - time = time.replace(",", " ") - time = time.replace(" : ", " ") - time = re.sub("\s+", " ", time) + request_time = request_time.replace(",", " ") + request_time = request_time.replace(" : ", " ") + request_time = re.sub(r"\s+", " ", request_time) - status = status.replace(",", " ") - status = status.replace(" : ", " ") - status = re.sub("\s+", " ", status) + status = status.replace(",", " ") + status = status.replace(" : ", " ") + status = re.sub(r"\s+", " ", status) - cache = cache.replace(",", " ") - cache = cache.replace(" : ", " ") - cache = re.sub("\s+", " ", cache) + cache = cache.replace(",", " ") + cache = cache.replace(" : ", " ") + cache = re.sub(r"\s+", " ", cache) addresses = address.split() - times = time.split() - statuses = status.split() - caches = cache.split() + times = request_time.split() + statuses = status.split() + caches = cache.split() index = 0 for uAddress in addresses: if uAddress in upstreams.keys(): try: - uTime = float(times[index]) + uTime = float(times[index]) except ValueError: - uTime = 0 + uTime = 0 if index < len(statuses): - uStatus = statuses[index] + uStatus = statuses[index] else: uStatus = "-" if index < len(caches): - uCache = caches[index] + uCache = caches[index] else: uCache = "-" if uAddress != "-": - upstreams[uAddress]["requests"] += 1 + upstreams[uAddress]["requests"] += 1 if uTime != "-": - upstreams[uAddress]["time"] += uTime + upstreams[uAddress]["time"] += uTime upstreams[uAddress]["times"].append(uTime) if uStatus != "-" and uStatus in upstreams[uAddress]["http"].keys(): upstreams[uAddress]["http"][uStatus]["requests"] += 1 if uCache != "-": - upstreams[uAddress]["cache"][uCache] += 1 + upstreams[uAddress]["cache"][uCache] += 1 index += 1 try: @@ -282,7 +288,7 @@ else: lastByteHandle.write(str(logHandle.tell())) lastByteHandle.close() except Exception as e: - print e.strerror + print("Failed to write status file (%s): %s" % (lastBytePath, e.strerror), file=sys.stderr) sys.exit(1) logHandle.close() @@ -293,20 +299,19 @@ else: if timeElapsed > 0: value = upstreams[upstream]["requests"] / timeElapsed - print "us%s_requests.value %s" % (sanitize(upstream), value) + print("us%s_requests.value %s" % (sanitize(upstream), value)) # Requests graph data if "request" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_requests" % (sanitize(siteName), sanitize(upstream)) - + print() + print("multigraph nginx_upstream_multi_%s.%s_requests" + % (sanitize(siteName), sanitize(upstream))) value = 0 if timeElapsed > 0: value = upstreams[upstream]["requests"] / timeElapsed - - print "us%s_requests.value %s" % (sanitize(upstream), value) - print "" + print("us%s_requests.value %s" % (sanitize(upstream), value)) + print() # Times graph data if "time" in graphs_enabled: @@ -315,47 +320,50 @@ else: if upstreams[upstream]["requests"] > 0: uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"] upstreams[upstream]["times"].sort() - print "" - print "multigraph nginx_upstream_multi_%s.%s_times" % (sanitize(siteName), sanitize(upstream)) - print "us%s_times.value %s" % (sanitize(upstream), uTime) + print() + print("multigraph nginx_upstream_multi_%s.%s_times" + % (sanitize(siteName), sanitize(upstream))) + print("us%s_times.value %s" % (sanitize(upstream), uTime)) for percentile in percentiles: percentileValue = 0 if upstreams[upstream]["requests"] > 0: uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"] percentileKey = int(percentile) * len(upstreams[upstream]["times"]) / 100 - if len(upstreams[upstream]["times"])%2 > 0: + if len(upstreams[upstream]["times"]) % 2 > 0: low = int(math.floor(percentileKey)) high = int(math.ceil(percentileKey)) - percentileValue = (upstreams[upstream]["times"][low] + upstreams[upstream]["times"][high]) / 2 + percentileValue = (upstreams[upstream]["times"][low] + + upstreams[upstream]["times"][high]) / 2 else: percentileValue = upstreams[upstream]["times"][int(percentileKey)] - print "us%s_times_percentile_%s.value %s" % (sanitize(upstream), percentile, percentileValue) - print "" + print("us%s_times_percentile_%s.value %s" + % (sanitize(upstream), percentile, percentileValue)) + print() # HTTP Status codes graph data if "http" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_statuses" % (sanitize(siteName), sanitize(upstream)) - keylist = httpStatusList.keys() - keylist.sort() - for status in keylist: + print() + print("multigraph nginx_upstream_multi_%s.%s_statuses" + % (sanitize(siteName), sanitize(upstream))) + for status in sorted(httpStatusList.keys()): value = 0 if timeElapsed > 0: value = upstreams[upstream]["http"][status]["requests"] / timeElapsed - print "http%s_%s_status.value %s" % (status, sanitize(upstream), value) - print "" + print("http%s_%s_status.value %s" % (status, sanitize(upstream), value)) + print() # Cache status graph data if "cache" in graphs_enabled: for upstream in upstreams.keys(): - print "" - print "multigraph nginx_upstream_multi_%s.%s_cache" % (sanitize(siteName), sanitize(upstream)) + print() + print("multigraph nginx_upstream_multi_%s.%s_cache" + % (sanitize(siteName), sanitize(upstream))) for status in cacheStatusList: value = 0 if timeElapsed > 0: value = upstreams[upstream]["cache"][status] / timeElapsed - print "us%s_%s_cache.value %s" % (sanitize(status), sanitize(upstream), value) - print "" + print("us%s_%s_cache.value %s" % (sanitize(status), sanitize(upstream), value)) + print() diff --git a/plugins/nginx/nginx_vhost_traffic b/plugins/nginx/nginx_vhost_traffic index a467d575..e2f710fe 100755 --- a/plugins/nginx/nginx_vhost_traffic +++ b/plugins/nginx/nginx_vhost_traffic @@ -1,8 +1,8 @@ #!/bin/sh # # Script for monitoring nginx Virtual host output traffic -# -# Requierements: logtail awk +# +# Requirements: logtail awk # one unique access log file with $bytes_sent value for more accuracy # check http://wiki.nginx.org/NginxHttpLogModule # @@ -11,23 +11,23 @@ # # Virtual host list # env.vhosts "example.com example.net example.org" -# +# # Log path # env.logdir = /var/log/nginx -# env.flogfile = access.log +# env.flogfile = access.log # # Position of the $bytes_sent in the access.log file # env.bparam 11 # # Aggregate subdomains -# ex: example.com will match www.example.com, webmail.example.com and *example.com +# ex: example.com will match www.example.com, webmail.example.com and *example.com # BUG: will also match also www.bad-example.com -# env.aggregate true #change to false to disable aggregation +# env.aggregate true #change to false to disable aggregation # # To report bugs, improvements or get updates # see http://github.com/joanpc/nginix_vhost_traffic # -# inspired in postfix_filtered_awk +# inspired in postfix_filtered_awk # Copyright (c) 2010, Joan Perez i Cauhe LOGDIR=${logdir:-/var/log/nginx} @@ -48,8 +48,8 @@ case $1 in echo 'graph_category webserver' i=0 - for vhost in $VHOSTS - do + for vhost in $VHOSTS + do i=$(($i + 1)) echo vhost$i.label $vhost echo vhost$i.type ABSOLUTE @@ -57,7 +57,7 @@ case $1 in echo vhost$i.draw $DRAW DRAW=STACK done - + echo rest.label Rest echo rest.type ABSOLUTE echo rest.cdef rest,8,* @@ -72,16 +72,16 @@ export AGGREGATE # Awk Script $LOGTAIL ${ACCESS_LOG} -o $STATEFILE | awk ' -BEGIN { +BEGIN { split(ENVIRON["VHOSTS"], hosts) for (host in hosts) { track[hosts[host]] = host} -} +} { cn[$2]+=$ENVIRON["BPARAM"] } -END { - for (host in cn) { - if (match(ENVIRON["AGGREGATE"], "true")) { +END { + for (host in cn) { + if (match(ENVIRON["AGGREGATE"], "true")) { found = 0 for (vhost in track) { if (index(host, vhost)) { @@ -93,7 +93,7 @@ END { if (! found) rest+=cn[host] } else { if (host in track) { - res[host] += cn[host] + res[host] += cn[host] } else rest+=cn[host] } } diff --git a/plugins/nova/nova_floating_ips b/plugins/nova/nova_floating_ips index 6b7e95ba..ee04a083 100755 --- a/plugins/nova/nova_floating_ips +++ b/plugins/nova/nova_floating_ips @@ -72,7 +72,7 @@ if __name__ == '__main__': if sys.argv[1] == "config": print_config() elif sys.argv[1] == "autoconf": - if not successful_import: + if not successful_import: print 'no (failed import nova module)' sys.exit(0) else: diff --git a/plugins/nova/nova_instance_ b/plugins/nova/nova_instance_ index d34730b7..b5ffb60c 100755 --- a/plugins/nova/nova_instance_ +++ b/plugins/nova/nova_instance_ @@ -114,7 +114,7 @@ if __name__ == '__main__': elif argv[1] == 'suggest': print_suggest() elif argv[1] == 'autoconf': - if not successful_import: + if not successful_import: print 'no (failed import nova module)' sys.exit(0) else: diff --git a/plugins/nova/nova_instance_launched b/plugins/nova/nova_instance_launched index 2e80c2ce..f2c6fb6d 100755 --- a/plugins/nova/nova_instance_launched +++ b/plugins/nova/nova_instance_launched @@ -52,12 +52,12 @@ if __name__ == '__main__': if sys.argv[1] == "config": print_config() elif sys.argv[1]=="autoconf" : - if not successful_import: + if not successful_import: print 'no (failed import nova module)' sys.exit(0) else: print 'yes' elif successful_import: utils.default_flagfile() - flags.FLAGS(sys.argv) + flags.FLAGS(sys.argv) print_values() diff --git a/plugins/nova/nova_instance_timing b/plugins/nova/nova_instance_timing index 8e59ca0e..de53b909 100755 --- a/plugins/nova/nova_instance_timing +++ b/plugins/nova/nova_instance_timing @@ -64,7 +64,7 @@ if __name__ == '__main__': if sys.argv[1] == "config": print_config() elif sys.argv[1] == "autoconf": - if not successful_import: + if not successful_import: print 'no (failed import nova module)' sys.exit(0) else: diff --git a/plugins/nova/nova_services b/plugins/nova/nova_services index d12823ea..47baa0b1 100755 --- a/plugins/nova/nova_services +++ b/plugins/nova/nova_services @@ -76,7 +76,7 @@ if __name__ == '__main__': if sys.argv[1] == "config": print_config() elif sys.argv[1] == "autoconf": - if not successful_import: + if not successful_import: print 'no (failed import nova module]' else: print 'yes' diff --git a/plugins/nscd/nscd_ b/plugins/nscd/nscd_ index a6e8877b..00bde9f8 100755 --- a/plugins/nscd/nscd_ +++ b/plugins/nscd/nscd_ @@ -33,19 +33,23 @@ #%# family=auto #%# capabilities=autoconf suggest -source $MUNIN_LIBDIR/plugins/plugin.sh +. "$MUNIN_LIBDIR/plugins/plugin.sh" NSCD_CFG=${nscd_cfg:-/etc/nscd.conf} AUTOCONF_CHECK=$(nscd -g | grep -ic 'yes.*cache is enabled') -SUGGEST_CHECK=$(nscd -g | grep -iB2 'yes.*cache is enabled' | awk {'print $1'} | head -1) -MODE=$(basename $0 | sed 's/^nscd_//g' | tr '_' '.') +SUGGEST_CHECK=$(nscd -g | grep -iB2 'yes.*cache is enabled' | awk '{print $1}' | head -1) +MODE=$(basename "$0" | sed 's/^nscd_//g' | tr '_' '.') case $1 in autoconf) - [ -r "$NSCD_CFG" ] && [ $AUTOCONF_CHECK -gt 0 ] && echo yes || echo "no (nscd config not found or no database enabled)" + if [ -r "$NSCD_CFG" ] && [ "$AUTOCONF_CHECK" -gt 0 ]; then + echo yes + else + echo "no (nscd config not found or no database enabled)" + fi exit 0 ;; suggest) - echo $SUGGEST_CHECK + echo "$SUGGEST_CHECK" exit 0 ;; config) @@ -66,7 +70,7 @@ CONFIG ;; fetch|*) nscd -g | awk "/^$MODE cache/ {printline = 1; print; next} /^.*cache:/ {printline = 0} printline" | \ - egrep '(suggested size|cache hit rate|current number of cached values|maximum number of cached values)' | \ - sed 's/%//' | awk {' if (NR==1) print "suggestedsize.value " $1; if (NR==2) print "cachehitrate.value " $1; if (NR==3) print "currnumber.value " $1; if (NR==4) print "maxnumber.value " $1'} + grep -E '(suggested size|cache hit rate|current number of cached values|maximum number of cached values)' | \ + sed 's/%//' | awk '{ if (NR==1) print "suggestedsize.value " $1; if (NR==2) print "cachehitrate.value " $1; if (NR==3) print "currnumber.value " $1; if (NR==4) print "maxnumber.value " $1}' ;; esac diff --git a/plugins/ntp/ntp_drift b/plugins/ntp/ntp_drift new file mode 100755 index 00000000..a19f4c83 --- /dev/null +++ b/plugins/ntp/ntp_drift @@ -0,0 +1,85 @@ +#! /bin/sh +# -*- sh -*- + +: <<=cut + +=head1 NAME + +ntp_drift - Munin plugin to monitor the NTP drift value. + +=head1 APPLICABLE SYSTEMS + +Any ntpd host. + +=head1 CONFIGURATION + +The following configuration parameters are used by this plugin: + + [ntp_drift] + env.driftfile - Path to driftfile. + +=head2 DEFAULT CONFIGURATION + + [ntp_drift] + env.driftfile "/var/lib/ntp/ntp.drift" + +=head1 USAGE + +Link this plugin to /etc/munin/plugins/ and restart the munin-node. + +=head1 AUTHOR + +HORINOUCHI Masato 2019-07-16 + +=head1 LICENSE + +Same as munin. + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf + +=cut + +driftfile=${driftfile:-'/var/lib/ntp/ntp.drift'} + +do_autoconf () { + if [ -r "$driftfile" ]; then + echo "yes" + else + echo "no (could not read driftfile '$driftfile'.)" + fi +} + + +do_config () { + cat <<'EOM' +graph_title NTP drift +graph_args --base 1000 +graph_vlabel Parts Per Million +graph_category time +drift.label Frequency Offset +graph_info The frequency of the local clock oscillator. A single floating point number, which records the frequency offset measured in parts-per-million (PPM). +EOM +} + + +do_ () { + if [ -r "$driftfile" ]; then + echo "drift.value $(cat "$driftfile")" + else + echo "drift.value U" + fi +} + + +case $1 in + autoconf|config|'') + do_"$1" + ;; + *) + echo "Don't know how to do that" >&2 + exit 1 + ;; +esac diff --git a/plugins/ntp/ntp_kernel_pll_prec b/plugins/ntp/ntp_kernel_pll_prec index 52fbd984..26fe2139 100755 --- a/plugins/ntp/ntp_kernel_pll_prec +++ b/plugins/ntp/ntp_kernel_pll_prec @@ -37,7 +37,7 @@ EOF if [ "$1" = "autoconf" ]; then { ntpq -c kerninfo; ntpdc -c kerninfo; } 2>/dev/null | awk 'BEGIN { ev=1; } - /^precision:/ { ev=0; } + /^precision:/ { ev=0; } END { if (ev == 0) { print "yes";} else { print "no"; } exit ev; }' exit 0 fi diff --git a/plugins/ntp/ntp_kernel_pll_tol b/plugins/ntp/ntp_kernel_pll_tol index 481c8a48..e1d48e6c 100755 --- a/plugins/ntp/ntp_kernel_pll_tol +++ b/plugins/ntp/ntp_kernel_pll_tol @@ -35,9 +35,9 @@ None known EOF if [ "$1" = "autoconf" ]; then - { ntpq -c kerninfo; ntpdc -c kerninfo; } 2>/dev/null | + { ntpq -c kerninfo; ntpdc -c kerninfo; } 2>/dev/null | awk 'BEGIN { ev=1; } - /^frequency tolerance:/ { ev=0; } + /^frequency tolerance:/ { ev=0; } END { if (ev == 0) { print "yes";} else { print "no"; } exit ev; }' exit 0 fi diff --git a/plugins/ntp/ntp_packets b/plugins/ntp/ntp_packets index 987ed1fd..bf93d3e9 100755 --- a/plugins/ntp/ntp_packets +++ b/plugins/ntp/ntp_packets @@ -34,7 +34,8 @@ import sys if len(sys.argv) == 2 and sys.argv[1] == 'config': print('graph_title NTP traffic') print('graph_vlabel Packets/${graph_period} received(-)/sent(+)') - print('graph_info This graph shows the packet rates of this ntpd. Bad means packets received with bad length or format. Authfailed means packets for which authentication failed.') + print('graph_info This graph shows the packet rates of this ntpd. Bad means packets received ' + 'with bad length or format. Authfailed means packets for which authentication failed.') print('graph_category time') print('received.label Received') print('received.type DERIVE') @@ -72,7 +73,8 @@ os.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:' + os.environ['PATH'] # Assuming that the ntpd version is the same as the ntpq or ntpdc # version. This is how a proper install should be. -version = subprocess.check_output(['ntpq', '-c', 'version'], universal_newlines=True).split()[1][0:5].replace('.', '') +version = subprocess.check_output(['ntpq', '-c', 'version'], + universal_newlines=True).split()[1][0:5].replace('.', '') if int(version) >= 427: cmd = 'ntpq' @@ -81,9 +83,14 @@ else: stats = dict() -stats_output = subprocess.check_output([cmd, '-c', 'iostats', '-c', 'sysstats'], universal_newlines=True).splitlines() +stats_output = subprocess.check_output([cmd, '-c', 'iostats', '-c', 'sysstats'], + universal_newlines=True).splitlines() -for line in stats_output: stats[line.split(':')[0]] = int(line.split(':')[1]) +# Split the cmd output into key/value pairs +# Lines that can't be split into 2 individual elements by delimiter ':' will be skipped +for line in stats_output: + if len(line.split(':')) == 2: + stats[line.split(':')[0]] = int(line.split(':')[1]) print('received.value ' + str(stats['received packets'])) print('sent.value ' + str(stats['packets sent'])) diff --git a/plugins/ntp/ntp_peers b/plugins/ntp/ntp_peers index 174a0c26..9f1a4ab0 100755 --- a/plugins/ntp/ntp_peers +++ b/plugins/ntp/ntp_peers @@ -39,7 +39,7 @@ # # Change log # v1.0.0 2008-07-21 Chris Hastie -# initial release +# initial release # # v1.1.0 2010-12-07 Uffe Norberg # - Changed default statedir to /var/lib/munin/plugin-state (Debian default) @@ -69,15 +69,13 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { if ($? eq "0") { if (`$NTPQ -np | wc -l` > 0) { print "yes\n"; - exit 0; } else { print "no (unable to list peers)\n"; - exit 1; } } else { print "no (ntpq not found)\n"; - exit 1; } + exit 0; } my %peers; @@ -107,7 +105,7 @@ open(SERVICE, "$COMMAND |") while () { if (/^[-+*#](\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(\s+\S+){7}\s+(\S+)/) { my $name = &lookupname($1); - $peers{$1}{'value'} = $3; + $peers{$1}{'value'} = $3; } } close(SERVICE); @@ -140,7 +138,7 @@ foreach my $peer (keys %peers) { # save list of peer IPs and hostnames if(-l $statefile) { die("$statefile is a symbolic link, refusing to touch it."); -} +} open (OUT, ">$statefile") or exit 4; foreach my $i (keys %peers) { print OUT "$i:" . $peers{$i}{'name'} . "\n"; @@ -155,12 +153,12 @@ sub by_name { # create a valid munin field name from the hostname sub sanitize_field () { my $field = shift; - + # replace illegal characters with an underscore $field =~ s/[^A-Za-z0-9_]/_/g; # prepend an underscore if name starts with a number $field =~ s/^([^A-Za-z_])/_$1/; - + # truncate to 19 characters if (length($field) > 19) { $field = substr($field, 0, 19); @@ -181,11 +179,11 @@ sub lookupname () { my $ip = shift; # have we already got it? if ($peers{$ip}{'name'}) { - return $peers{$ip}{'name'}; + return $peers{$ip}{'name'}; } # else look it up - my $iaddr = inet_aton($ip); - my $name = gethostbyaddr($iaddr, AF_INET) || $ip; + my $iaddr = inet_aton($ip); + my $name = gethostbyaddr($iaddr, AF_INET) || $ip; # add to cache $peers{$ip}{'name'} = $name; return $name; diff --git a/plugins/ntp/ntp_peers_ipv6 b/plugins/ntp/ntp_peers_ipv6 index d98701e0..584d45b3 100755 --- a/plugins/ntp/ntp_peers_ipv6 +++ b/plugins/ntp/ntp_peers_ipv6 @@ -29,7 +29,7 @@ # # Change log # v1.0.0 2008-07-21 Chris Hastie -# initial release +# initial release # v1.0.1 2009-06-05 Tony Hoyle # ipv6 support. Remove dns lookups. # @@ -55,15 +55,13 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { if ($? eq "0") { if (`$NTPQ -np | wc -l` > 0) { print "yes\n"; - exit 0; } else { print "no (unable to list peers)\n"; - exit 1; } } else { print "no (ntpq not found)\n"; - exit 1; } + exit 0; } my %peers; @@ -75,7 +73,7 @@ open(SERVICE, "$COMMAND |") while () { if(/^\s*\d+\s+(\d+)\s+/) { my ($name, $offset) = &lookupip($1); - $peers{$name} = $offset; + $peers{$name} = $offset; } } close(SERVICE); @@ -106,12 +104,12 @@ foreach my $peer (keys %peers) { # create a valid munin field name from the hostname sub sanitize_field () { my $field = shift; - + # replace illegal characters with an underscore $field =~ s/[^A-Za-z0-9_]/_/g; # prepend an underscore if name starts with a number $field =~ s/^([^A-Za-z_])/_$1/; - + # truncate to 19 characters if (length($field) > 19) { $field = substr($field, 0, 19); diff --git a/plugins/ntp/ntp_pool_score_ b/plugins/ntp/ntp_pool_score_ index 1e5ce7d4..44c2633c 100755 --- a/plugins/ntp/ntp_pool_score_ +++ b/plugins/ntp/ntp_pool_score_ @@ -5,7 +5,7 @@ =head1 NAME -ntp_pool_score_ - Wildcard plugin to monitor the score assigned to a server +ntp_pool_score_ - Wildcard plugin to monitor the score assigned to a server from pool.ntp.org . This is achieved by fetching the cvs data from http://www.pool.ntp.org/scores/IP_ADDRESS/log?limit=1 using wget. @@ -38,7 +38,7 @@ tocho AT tochev DOT net =head1 LICENSE GNU General Public License, version 2 - http://www.gnu.org/licenses/gpl-2.0.html + http://www.gnu.org/licenses/gpl-2.0.html =head1 MAGIC MARKERS diff --git a/plugins/ntp/ntp_queries b/plugins/ntp/ntp_queries index 18a25b22..f0690e39 100755 --- a/plugins/ntp/ntp_queries +++ b/plugins/ntp/ntp_queries @@ -27,7 +27,7 @@ # # Change log # v1.0.0 2009-03-11 Chris Hastie -# initial release +# initial release # # # @@ -51,15 +51,13 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { if ($? eq "0") { if (`$NTPDC -c sysstats | wc -l` > 0) { print "yes\n"; - exit 0; } else { print "no (unable to list system stats)\n"; - exit 1; } } else { print "no (ntpdc not found)\n"; - exit 1; } + exit 0; } my $queries = 0; @@ -86,20 +84,19 @@ while () { } if (/^bad version:\s*(\d*)/) { $queries += $1; - } + } # if (/^access denied:\s*(\d*)/) { # $queries += $1; - # } + # } # if (/^bad length or format:\s*(\d*)/) { # $queries += $1; - # } + # } # if (/^bad authentication:\s*(\d*)/) { # $queries += $1; - # } + # } # if (/^rate exceeded:\s*(\d*)/) { # $queries += $1; - # } - + # } } close(SERVICE); diff --git a/plugins/ntp/ntpdate_ b/plugins/ntp/ntpdate_ index bb62d5bd..b5f4ae29 100755 --- a/plugins/ntp/ntpdate_ +++ b/plugins/ntp/ntpdate_ @@ -6,7 +6,7 @@ # # config (required) # -# Author: Rune Nordbe Skillingstad +# Author: Rune Nordbøe Skillingstad # # Magic markers - optional - used by installation scripts and # munin-node-configure: diff --git a/plugins/omreport/omreport_fan_speed b/plugins/omreport/omreport_fan_speed index c8694f11..2401897d 100755 --- a/plugins/omreport/omreport_fan_speed +++ b/plugins/omreport/omreport_fan_speed @@ -92,7 +92,7 @@ else { print "graph_title OpenManage - Fan Probes\n"; print "graph_args --base 1000 -l 0\n"; print "graph_vlabel Speed in RPMs\n"; - print "graph_category Sensors\n"; + print "graph_category sensors\n"; foreach my $j (sort keys %val) { print "fan_$j.label $val{$j}{\"Probe Name\"}\n"; if ($val{$j}{"Warning Threshold"} !~ m/\[N\/A\]/i) { diff --git a/plugins/omreport/omreport_pwrmon_current b/plugins/omreport/omreport_pwrmon_current index 0734764c..af0eb858 100755 --- a/plugins/omreport/omreport_pwrmon_current +++ b/plugins/omreport/omreport_pwrmon_current @@ -2,7 +2,7 @@ # # Copyright (C) 2009 Andrew Chadwick, University of Oxford # Based on work by Rackspace US, Inc. , (C) 2008. -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, @@ -91,7 +91,7 @@ else { print "graph_title OpenManage - Power Monitoring - Current\n"; print "graph_args --base 1000 -l 0\n"; print "graph_vlabel Amps\n"; - print "graph_category Sensors\n"; + print "graph_category sensors\n"; foreach my $j (sort keys %val) { print "$j.label $val{$j}{label}\n"; } diff --git a/plugins/omreport/omreport_pwrmon_power b/plugins/omreport/omreport_pwrmon_power index 0d223ca2..a58ec270 100755 --- a/plugins/omreport/omreport_pwrmon_power +++ b/plugins/omreport/omreport_pwrmon_power @@ -2,7 +2,7 @@ # # Copyright (C) 2009 Andrew Chadwick, University of Oxford # Based on work by Rackspace US, Inc. , (C) 2008. -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, @@ -101,7 +101,7 @@ else { print "graph_title OpenManage - Power Monitoring - Power\n"; print "graph_args --base 1000 -l 0\n"; print "graph_vlabel Watts\n"; - print "graph_category Sensors\n"; + print "graph_category sensors\n"; foreach my $j (sort keys %val) { print "$j.label $val{$j}{label}\n"; if ($val{$j}{warning}) { diff --git a/plugins/omreport/omreport_storage_temp b/plugins/omreport/omreport_storage_temp index b821289d..ca67cc71 100755 --- a/plugins/omreport/omreport_storage_temp +++ b/plugins/omreport/omreport_storage_temp @@ -2,7 +2,7 @@ # # Copyright (C) 2009 Andrew Chadwick, University of Oxford # Based on work by Rackspace US, Inc. , (C) 2008. -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, @@ -162,7 +162,7 @@ else { print "graph_title OpenManage - Storage - Temperatures\n"; print "graph_args --base 1000 -l 0\n"; print "graph_vlabel Temp in Degrees Celsius\n"; - print "graph_category Sensors\n"; + print "graph_category sensors\n"; print "graph_info Temperature sensors within storage enclosures on the system, typically external ones managed via PERC 6/E adapters.\n"; } foreach my $c (sort keys %ctrlrs) { diff --git a/plugins/omreport/omreport_temp b/plugins/omreport/omreport_temp index ddf2c346..01eeee16 100755 --- a/plugins/omreport/omreport_temp +++ b/plugins/omreport/omreport_temp @@ -14,11 +14,11 @@ # # You should have received a copy of the GNU General Public License # along with this program; if not, see http://www.gnu.org/licenses/gpl.txt -# +# # # This plugin will graph the chassis temp sensors on a Dell PowerEdge Server # via the omreport tool. It has been tested on the following chassis: -# +# # PE2650/6650 # PE2850/6850 # PE2950 @@ -92,7 +92,7 @@ else { print "graph_title OpenManage - Temperature Probes\n"; print "graph_args --base 1000 -l 0\n"; print "graph_vlabel Temperature in Celsius\n"; - print "graph_category Sensors\n"; + print "graph_category sensors\n"; foreach my $j (sort keys %val) { print "probe_$j.label $val{$j}{\"Probe Name\"}\n"; print "probe_$j.warning $val{$j}{\"Warning Threshold\"}\n"; diff --git a/plugins/openvz/openvz-load-avg b/plugins/openvz/openvz-load-avg index 77dbcb42..c624ad85 100755 --- a/plugins/openvz/openvz-load-avg +++ b/plugins/openvz/openvz-load-avg @@ -1,12 +1,12 @@ #!/usr/bin/perl -# +# # Copyright (c) 2008, Maxime Besson # Copyright (c) 2011, David Bernard -# +# # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. -# +# # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR @@ -15,7 +15,7 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -# +# # Graph VE load averages on an OpenVZ server # # This plugin needs root privileges to call vzlist, so add this : @@ -62,7 +62,7 @@ if ( $arg eq "autoconf" ) { # Dynamic config my @result=`/usr/sbin/vzlist -H -a -s veid -o veid,name`; my $A=0; # draw an area only under the first line - + for (@result ) { ( my $veid,my $label ) = /^\s*(\d+)\s*([\w\-]+)\s*$/ ; my $name = $label; diff --git a/plugins/openvz/openvzcpu b/plugins/openvz/openvzcpu index 288f81ca..7df858cb 100755 --- a/plugins/openvz/openvzcpu +++ b/plugins/openvz/openvzcpu @@ -45,7 +45,7 @@ of idle and iowait times: env.drawidle 1 For kernels which have other than 100 jiffies per second (sic) n.b. this is -unlikely to be necessary - you may add the followin to the plugin-specific +unlikely to be necessary - you may add the following to the plugin-specific configuration: env.HZ 1000 diff --git a/plugins/openvz/vpsmem b/plugins/openvz/vpsmem index c9d9d789..173ff31a 100755 --- a/plugins/openvz/vpsmem +++ b/plugins/openvz/vpsmem @@ -1,8 +1,8 @@ #!/bin/bash # # Monitors memory usage in openVZ or Virtuozzo -# based on http://www.huschi.net/archiv/speicher-anzeigen-mit-vzfree.html -# Author: Michael Richter, http://osor.de/ +# based on http://www.huschi.net/archive/speicher-anzeigen-mit-vzfree.html +# Author: Michael Richter, http://osor.de/ # Cleaned up and translated to english by: Marian Sigler , 2010-08-13 # #%# capabilities=autoconf @@ -12,11 +12,10 @@ BEANCOUNTERS=/proc/user_beancounters if [ "$1" == "autoconf" ]; then if [ -e $BEANCOUNTERS ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ ! -r $BEANCOUNTERS ]; then diff --git a/plugins/openwrt/snmp__memory_openwrt b/plugins/openwrt/snmp__memory_openwrt index af496ec0..2be0186e 100755 --- a/plugins/openwrt/snmp__memory_openwrt +++ b/plugins/openwrt/snmp__memory_openwrt @@ -72,7 +72,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n" unless ($host eq 'localhost'); print <<'EOC'; graph_title Memory usage -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel kB graph_category memory graph_info This graph shows total and used memory on the host. diff --git a/plugins/oracle/oracle-pga-monitor b/plugins/oracle/oracle-pga-monitor index a0eaa1a3..90920a37 100755 --- a/plugins/oracle/oracle-pga-monitor +++ b/plugins/oracle/oracle-pga-monitor @@ -1,54 +1,56 @@ -#! /usr/bin/ruby -# -# Munin Plugin for PGA memory components monitoring -# -# Author: Wilfred Chau -# Date: 2011-05-13 -# Version: 1.0 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# Prerequistes: -# 1) env.ORACLE_HOME set in munin-node -# 2) rubygems -# 3) oci8 - DBI gem for connecting to Oracle -# * instruction of installing oci8 is available here: -# http://ruby-oci8.rubyforge.org/en/InstallBinaryPackage.html -# -# Usage: -# 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) -# 2) chmod to allow executable to others -# 3) create symbolic link in /etc/munin/plugins -# ln -s /usr/share/munin/plugins/oracle__pga.rb /etc/munin/plugins/oracle__pga.rb -# ** replace with your oralce sid -# -# Parameters: -# autoconf -# config (required) -# -# Configurable variables: -# orauser : oracle user who has select privilege to query v$pgastat view -# orapass : password for the oracle user -# dbport : port used by the monitored instance (notice: numeric value) -# dbname : database to be monitored -# dbhost : host or ip address of db instance -# -# +#!/usr/bin/env ruby + +=begin + +Munin Plugin for PGA memory components monitoring + +Author: Wilfred Chau +Date: 2011-05-13 +Version: 1.0 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Prerequistes: + 1) env.ORACLE_HOME set in munin-node + 2) rubygems + 3) oci8 - DBI gem for connecting to Oracle + * instruction of installing oci8 is available here: + http://ruby-oci8.rubyforge.org/en/InstallBinaryPackage.html + +Usage: + 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) + 2) chmod to allow executable to others + 3) create symbolic link in /etc/munin/plugins + ln -s /usr/share/munin/plugins/oracle__pga.rb /etc/munin/plugins/oracle__pga.rb + ** replace with your oralce sid + +Parameters: + autoconf + config (required) + +Configurable variables: + orauser : oracle user who has select privilege to query v$pgastat view + orapass : password for the oracle user + dbport : port used by the monitored instance (notice: numeric value) + dbname : database to be monitored + dbhost : host or ip address of db instance + #%# family=auto #%# capabilities=autoconf +=end + require 'rubygems' require 'oci8' @@ -58,65 +60,64 @@ dbport = 1522 dbname = 'orcl' dbhost = 'localhost' -tnsname = "(DESCRIPTION = +tnsname = "(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = #{dbhost})(PORT = #{dbport})) (CONNECT_DATA = (SID = #{dbname})))" -def runQuery (name,query) - rows = $conn.exec(query) - puts "#{name}.value #{rows.fetch().to_s}" - rows.close +def runQuery(name, query) + rows = $conn.exec(query) + puts "#{name}.value #{rows.fetch}" + rows.close end - # # Queries # -pga_target_query = "SELECT TO_CHAR(ROUND(decode(unit,'bytes',(value)/(1024*1024),value),2)) pga_target +pga_target_query = "SELECT TO_CHAR(ROUND(decode(unit,'bytes',(value)/(1024*1024),value),2)) pga_target from V$PGASTAT where name = 'aggregate PGA target parameter'" pga_query = "SELECT TO_CHAR(ROUND(decode(unit,'bytes',(value)/(1024*1024),value),2)) pga from V$PGASTAT where name = 'total PGA inuse'" -pga_components = { "pga_target" => pga_target_query, - "pga_in_use" => pga_query - } +pga_components = { 'pga_target' => pga_target_query, + 'pga_in_use' => pga_query } # # autoconf # -if ARGV[0] == "autoconf" - if tnsname.length > 1 && orauser.length > 1 && orapass.length > 1 - puts "yes" - else - puts "no" - puts "Usage: #{__FILE__} autoconf|conf" - end - exit 0 +case ARGV[0] +when 'autoconf' + if tnsname.length > 1 && orauser.length > 1 && orapass.length > 1 + puts 'yes' + else + puts 'no' + puts "Usage: #{__FILE__} autoconf|conf" + end + exit 0 # # config definition # -elsif ARGV[0] == "config" - puts "graph_args --base 1024k -r --lower-limit 0" - puts "graph_title Oracle PGA from #{dbname}" - puts "graph_category db" - puts "graph_info This graph shows the PGA memory usage (in MB)" - puts "graph_vlabel MB" - puts "graph_scale no" - puts "graph_period second" +when 'config' + puts 'graph_args --base 1024k -r --lower-limit 0' + puts "graph_title Oracle PGA from #{dbname}" + puts 'graph_category db' + puts 'graph_info This graph shows the PGA memory usage (in MB)' + puts 'graph_vlabel MB' + puts 'graph_scale no' + puts 'graph_period second' - pga_components.keys.each do |p| - puts "#{p}.label #{p}" - puts "#{p}.info PGA: #{p}" - puts "#{p}.type GAUGE" - puts "#{p}.draw LINE1" - end + pga_components.keys.each do |p| + puts "#{p}.label #{p}" + puts "#{p}.info PGA: #{p}" + puts "#{p}.type GAUGE" + puts "#{p}.draw LINE1" + end - exit 0 + exit 0 end $conn = OCI8.new(orauser, orapass, tnsname) pga_components.each do |pc, query| - runQuery(pc, query) + runQuery(pc, query) end $conn.logoff diff --git a/plugins/oracle/oracle-sga b/plugins/oracle/oracle-sga index d167296d..3fc21f03 100755 --- a/plugins/oracle/oracle-sga +++ b/plugins/oracle/oracle-sga @@ -1,53 +1,55 @@ -#! /usr/bin/ruby -# -# Munin Plugin for SGA memory components monitoring -# -# Author: Wilfred Chau -# Date: 2011-05-12 -# Version: 1.0 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# Prerequistes: -# 1) env.ORACLE_HOME set in munin-node -# 2) rubygems -# 3) oci8 - DBI gem for connecting to Oracle -# * instruction of installing oci8 is available here: -# http://ruby-oci8.rubyforge.org/en/InstallBinaryPackage.html -# -# Usage: -# 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) -# 2) chmod to allow executable to others -# 3) create symbolic link in /etc/munin/plugins -# ln -s /usr/share/munin/plugins/oracle_orcl_sga.rb /etc/munin/plugins/oracle_orcl_sga.rb -# -# Parameters: -# autoconf -# config (required) -# -# Configurable variables: -# orauser : oracle user who has select privilege to query v$sgastat view -# orapass : password for the oracle user -# dbport : port used by the monitored instance (notice: numeric value) -# dbname : database to be monitored -# dbhost : host or ip address of db instance -# -# +#!/usr/bin/env ruby + +=begin + +Munin Plugin for SGA memory components monitoring + +Author: Wilfred Chau +Date: 2011-05-12 +Version: 1.0 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Prerequistes: + 1) env.ORACLE_HOME set in munin-node + 2) rubygems + 3) oci8 - DBI gem for connecting to Oracle + * instruction of installing oci8 is available here: + http://ruby-oci8.rubyforge.org/en/InstallBinaryPackage.html + +Usage: + 1) copy this script to the munin install plugins directory (e.g. /usr/share/munin/plugins) + 2) chmod to allow executable to others + 3) create symbolic link in /etc/munin/plugins + ln -s /usr/share/munin/plugins/oracle_orcl_sga.rb /etc/munin/plugins/oracle_orcl_sga.rb + +Parameters: + autoconf + config (required) + +Configurable variables: + orauser : oracle user who has select privilege to query v$sgastat view + orapass : password for the oracle user + dbport : port used by the monitored instance (notice: numeric value) + dbname : database to be monitored + dbhost : host or ip address of db instance + #%# family=auto #%# capabilities=autoconf +=end + require 'rubygems' require 'oci8' @@ -57,21 +59,20 @@ dbport = 1522 dbname = 'orcl' dbhost = 'localhost' -tnsname = "(DESCRIPTION = +tnsname = "(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = #{dbhost})(PORT = #{dbport})) (CONNECT_DATA = (SID = #{dbname})))" -def runQuery (name,query) - rows = $conn.exec(query) - puts "#{name}.value #{rows.fetch().to_s}" - rows.close +def runQuery(name, query) + rows = $conn.exec(query) + puts "#{name}.value #{rows.fetch}" + rows.close end - # # Queries # -shared_pool_query = "SELECT TO_CHAR(ROUND(SUM(decode(pool, 'shared pool', +shared_pool_query = "SELECT TO_CHAR(ROUND(SUM(decode(pool, 'shared pool', decode(name, 'library cache',0, 'dictionary chace',0, 'free memory',0, @@ -96,58 +97,57 @@ large_pool_query = "SELECT TO_CHAR(ROUND(SUM(decode(pool, 'large pool', (bytes)/ log_buffer_query = "SELECT TO_CHAR(ROUND(SUM(decode(pool, NULL, decode(name, 'log_buffer', (bytes)/(1024*1024),0),0)),2)) sga_lbuffer - from V$SGASTAT" + from V$SGASTAT" - -memory_components = { "fixed_area" => fixed_area_query, - "buffer_cache" => buffer_cache_query, - "java_pool" => java_pool_query, - "large_pool" => large_pool_query, - "log_buffer" => log_buffer_query, - "shared_pool" => shared_pool_query - } +memory_components = { 'fixed_area' => fixed_area_query, + 'buffer_cache' => buffer_cache_query, + 'java_pool' => java_pool_query, + 'large_pool' => large_pool_query, + 'log_buffer' => log_buffer_query, + 'shared_pool' => shared_pool_query } # # autoconf # -if ARGV[0] == "autoconf" - if tnsname.length > 1 && orauser.length > 1 && orapass.length > 1 - puts "yes" - else - puts "no" - puts "Usage: #{__FILE__} autoconf|conf" - end - exit 0 +case ARGV[0] +when 'autoconf' + if tnsname.length > 1 && orauser.length > 1 && orapass.length > 1 + puts 'yes' + else + puts 'no' + puts "Usage: #{__FILE__} autoconf|conf" + end + exit 0 # # config definition # -elsif ARGV[0] == "config" - puts "graph_args --base 1024k -r --lower-limit 0" - puts "graph_title Oracle SGA from #{dbname}" - puts "graph_category db" - puts "graph_info This graph shows the SGA memory usage (in MB)" - puts "graph_vlabel MB" - puts "graph_scale no" - puts "graph_period second" +when 'config' + puts 'graph_args --base 1024k -r --lower-limit 0' + puts "graph_title Oracle SGA from #{dbname}" + puts 'graph_category db' + puts 'graph_info This graph shows the SGA memory usage (in MB)' + puts 'graph_vlabel MB' + puts 'graph_scale no' + puts 'graph_period second' - memory_components.keys.each do |m| - puts "#{m}.label #{m}" - puts "#{m}.info SGA: #{m}" - puts "#{m}.type GAUGE" + memory_components.keys.each do |m| + puts "#{m}.label #{m}" + puts "#{m}.info SGA: #{m}" + puts "#{m}.type GAUGE" - # make sure fixed_area is at the bottom of the stack - if ( m == 'fixed_area' ) - puts "#{m}.draw AREA" - else - puts "#{m}.draw STACK" - end - end + # make sure fixed_area is at the bottom of the stack + if m == 'fixed_area' + puts "#{m}.draw AREA" + else + puts "#{m}.draw STACK" + end + end - exit 0 + exit 0 end $conn = OCI8.new(orauser, orapass, tnsname) memory_components.each do |mc, query| - runQuery(mc, query) + runQuery(mc, query) end $conn.logoff diff --git a/plugins/oracle/oracle__connections b/plugins/oracle/oracle__connections index 5119e840..1f7a2b21 100755 --- a/plugins/oracle/oracle__connections +++ b/plugins/oracle/oracle__connections @@ -1,7 +1,7 @@ #!/usr/bin/perl -w # Plugin for monitor oracle connections. # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -30,7 +30,7 @@ # 'oracle'. Anyway, Munin must be told which user # this plugin should be run as. # dbpass - The corresponding password, if -# applicable. Default to undef. +# applicable. Default to undef. # # showusers - If set to 1 show usernames and num. of connections. # Default is not show users (0). @@ -66,14 +66,14 @@ if (exists $ARGV[0]) { # Check for DBD::Oracle if (! eval "require DBD::Oracle;") { print "no (DBD::Oracle not found)"; - exit 1; + exit 0; } if ($dbh) { print "yes\n"; exit 0; } else { print "no Unable to access Database $dbname on host $dbhost as user $dbuser.\nError returned was: ". $DBI::errstr; - exit 1; + exit 0; } } @@ -137,9 +137,9 @@ if ( $showusers ) { $sth->execute(); my $total = 0; while ( my ($datname,$curr_conn) = $sth->fetchrow_array ) { - print "$datname.value $curr_conn\n"; + print "$datname.value $curr_conn\n"; $total = $total+$curr_conn; - } + } print "total.value $total\n"; } else { my $sql = "select count(username) from v\$session where username is not null"; diff --git a/plugins/oracle/oracle__database_hitratio b/plugins/oracle/oracle__database_hitratio index e07dba87..75a363d6 100755 --- a/plugins/oracle/oracle__database_hitratio +++ b/plugins/oracle/oracle__database_hitratio @@ -1,7 +1,7 @@ #!/usr/bin/perl -w # Plugin for monitor oracle database reads. # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -63,14 +63,14 @@ if (exists $ARGV[0]) { # Check for DBD::Oracle if (! eval "require DBD::Oracle;") { print "no (DBD::Oracle not found)"; - exit 1; + exit 0; } if ($dbh) { print "yes\n"; exit 0; } else { print "no Unable to access Database $dbname on host $dbhost as user $dbuser.\nError returned was: ". $DBI::errstr; - exit 1; + exit 0; } } @@ -100,7 +100,7 @@ $sth_curr->execute(); my ($read_hitratio) = $sth_curr->fetchrow(); print "read_hitratio.value $read_hitratio\n"; -#libray hit ratio +#library hit ratio $sql_curr = "select sum(lc.pins)/(sum(lc.pins)+sum(lc.reloads))*100 \ from v\$librarycache lc"; diff --git a/plugins/oracle/oracle__locks b/plugins/oracle/oracle__locks index d8140fbc..5e4a5969 100755 --- a/plugins/oracle/oracle__locks +++ b/plugins/oracle/oracle__locks @@ -1,7 +1,7 @@ #!/usr/bin/perl -w # Plugin for monitor oracle locks # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -64,14 +64,14 @@ if (exists $ARGV[0]) { # Check for DBD::Oracle if (! eval "require DBD::Oracle;") { print "no (DBD::Oracle not found)"; - exit 1; + exit 0; } if ($dbh) { print "yes\n"; exit 0; } else { print "no Unable to access Database $dbname on host $dbhost as user $dbuser.\nError returned was: ". $DBI::errstr; - exit 1; + exit 0; } } diff --git a/plugins/oracle/oracle__tablespace_usage b/plugins/oracle/oracle__tablespace_usage index c35235a0..4a66712c 100755 --- a/plugins/oracle/oracle__tablespace_usage +++ b/plugins/oracle/oracle__tablespace_usage @@ -1,7 +1,7 @@ #!/usr/bin/perl -w # Plugin for monitor oracle database reads. # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -65,14 +65,14 @@ if (exists $ARGV[0]) { # Check for DBD::Oracle if (! eval "require DBD::Oracle;") { print "no (DBD::Oracle not found)"; - exit 1; + exit 0; } if ($dbh) { print "yes\n"; exit 0; } else { print "no Unable to access Database $dbname on host $dbhost as user $dbuser.\nError returned was: ". $DBI::errstr; - exit 1; + exit 0; } } diff --git a/plugins/oracle/oracle_connections b/plugins/oracle/oracle_connections index a05f5d8c..65996ca1 100755 --- a/plugins/oracle/oracle_connections +++ b/plugins/oracle/oracle_connections @@ -6,7 +6,7 @@ # Author: Kevin Kunkel (kunkel.kevin@gmail.com) on December 11, 2007 # (Based off the perl munin plugin by Joan Carles Soler) # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -25,7 +25,7 @@ # 'oracle'. Anyway, Munin must be told which user # this plugin should be run as. # oracle_pass - The corresponding password, if -# applicable. Default to undef. +# applicable. Default to undef. # # SHOW_ORACLE_USERS - If set to 1 show usernames and num. of connections. # Default is not show users (0). @@ -119,21 +119,21 @@ EOF` set pagesize 0 select username, count(username) from v\$session where username is not null group by username; EOF - echo $WARN_CRIT| awk '{ print "total.warning " $1 "\ntotal.critical " $2 }' + echo $WARN_CRIT| awk '{ print "total.warning " $1 "\ntotal.critical " $2 }' else echo "connections.label active connections" echo "connections.info active connections" echo "connections.type GAUGE" - echo $WARN_CRIT| awk '{ print "connections.warning " $1 "\nconnections.critical " $2 }' + echo $WARN_CRIT| awk '{ print "connections.warning " $1 "\nconnections.critical " $2 }' fi fi -if [ $SHOW_ORACLE_USERS -eq 1 ] +if [ $SHOW_ORACLE_USERS -eq 1 ] then sqlplus -s ${oracle_user}/${oracle_pass}@$ORACLE_SID << EOF | \ grep -v '^$'|awk 'BEGIN { total=0 } { - print $1 ".value " $2 + print $1 ".value " $2 total=total+$2 } END { print "total.value " total }' set pagesize 0 select username, count(username) from v\$session where username is not null group by username; @@ -145,7 +145,7 @@ set pagesize 0 select count(username) from v\$session where username is not null; EOF fi - + echo "max_connections.value" "`sqlplus -s ${oracle_user}/${oracle_pass}@$ORACLE_SID << EOF | \ grep -v '^$' | awk '{print $1 }' set pagesize 0 diff --git a/plugins/oracle/oracle_sysstat b/plugins/oracle/oracle_sysstat index 82159721..d25f87e1 100755 --- a/plugins/oracle/oracle_sysstat +++ b/plugins/oracle/oracle_sysstat @@ -83,30 +83,30 @@ example: env.exclude_module asmusage tablespace default: asmusage - Module name(s) to exclude seperated by white-space. - By default, asmusage module is excluded because another privilege + Module name(s) to exclude separated by white-space. + By default, asmusage module is excluded because another privilege is necessary to connect ASM instance. env.include_module: example: env.include_module asmusage default: none - Module name(s) to include seperated by white-space. - If both include_module and exclude_module are set, exclude will be + Module name(s) to include separated by white-space. + If both include_module and exclude_module are set, exclude will be ignored. - env.plugin_name: + env.plugin_name: example: env.plugin_name oracle_sysstat_2 default: program name (usually oracle_sysstat) - Used for internal graph name. + Used for internal graph name. It will be useful to monitor multi-instance databases. - env.db_name: + env.db_name: example: env.db_name dbname default: none - Used for graph title. + Used for graph title. It will be useful to monitor multi-instance databases. =head1 NOTES @@ -133,7 +133,7 @@ . "${MUNIN_LIBDIR:-}/plugins/plugin.sh" is_multigraph "$@" -# Like perl 'use strict;' +# Like perl 'use strict;' set -o nounset # Global variables @@ -518,7 +518,7 @@ global_attrs[$key]=" graph_category db graph_args --base 1000 --lower-limit 0 --rigid graph_vlabel microseconds - graph_info Oracle Wait Events - It may look wierd that Y-axis indicates 'microseconds per second'. Although number of times of wait event looks easier to understand, in many cases the number of events does not matter, but wait time become more important to analyze bottle necks. + graph_info Oracle Wait Events - It may look weird that Y-axis indicates 'microseconds per second'. Although number of times of wait event looks easier to understand, in many cases the number of events does not matter, but wait time become more important to analyze bottle necks. " data_attrs[$key]="" getfield_func[$key]=getfield_eventwait @@ -661,7 +661,7 @@ sqlplus_variables=" set numwidth 30 " -# Functions +# Functions autoconf() { if which sqlplus >/dev/null ; then @@ -688,7 +688,7 @@ fetch() { do_config() { local label_max_length=45 local field type draw label - local fields= + local fields= getfield echo "multigraph ${plugin_name}_${module}" @@ -697,7 +697,7 @@ do_config() { echo "${global_attrs[$module]}" | sed -e 's/^ *//' -e '/^$/d' # print data source attributes - # split line into field,type,draw,label + # split line into field,type,draw,label while read -r field type draw label do [ -z "$field" ] && continue @@ -731,7 +731,7 @@ module_list() { echo "$include_module" else for i in $exclude_module - do + do # remove excluded modules unset -v "global_attrs[$i]" done @@ -768,8 +768,8 @@ getvalue_sysstat() { echo "${sqlplus_variables} VAR vf VARCHAR2(64) VAR vl VARCHAR2(64) - EXEC :vf := '${field}' - EXEC :vl := '${label}' + EXEC :vf := '${field}' + EXEC :vl := '${label}' SELECT :vf || '.value ' || value FROM @@ -789,8 +789,8 @@ getvalue_sgainfo() { echo "${sqlplus_variables} VAR vf VARCHAR2(64) VAR vl VARCHAR2(64) - EXEC :vf := '${field}' - EXEC :vl := '${label}' + EXEC :vf := '${field}' + EXEC :vl := '${label}' SELECT :vf || '.value ' || bytes FROM @@ -810,8 +810,8 @@ getvalue_pgastat() { echo "${sqlplus_variables} VAR vf VARCHAR2(64) VAR vl VARCHAR2(64) - EXEC :vf := '${field}' - EXEC :vl := '${label}' + EXEC :vf := '${field}' + EXEC :vl := '${label}' SELECT :vf || '.value ' || value FROM @@ -831,8 +831,8 @@ getvalue_cputime() { echo "${sqlplus_variables} VAR vf VARCHAR2(64) VAR vl VARCHAR2(64) - EXEC :vf := '${field}' - EXEC :vl := '${label}' + EXEC :vf := '${field}' + EXEC :vl := '${label}' SELECT :vf || '.value ' || ROUND( value / 1000000 ) FROM @@ -853,11 +853,11 @@ FROM WHERE name = 'physical reads cache' ; -SELECT +SELECT 'buf_logical.value ' || ( sd.value + sc.value ) -FROM - v\$sysstat sd, v\$sysstat sc -WHERE +FROM + v\$sysstat sd, v\$sysstat sc +WHERE sd.name = 'db block gets from cache' AND sc.name = 'consistent gets from cache' ; SELECT 'lib_pins.value ' || SUM(pins) FROM v\$librarycache; @@ -892,7 +892,7 @@ ${sqlplus_variables} SELECT REGEXP_REPLACE( du.username, '^[^A-Za-z_]|[^A-Za-z0-9_]', '_' ) || '.value ' || count(vs.username) -FROM +FROM ( SELECT username FROM @@ -952,7 +952,7 @@ FROM ) en LEFT JOIN v\$session se ON - en.wait_class = se.wait_class AND + en.wait_class = se.wait_class AND se.username is not null AND se.wait_time = 0 GROUP BY @@ -1010,12 +1010,12 @@ getfield_eventwait2() { data_attrs[$module]=$( ${sqlplus} "${oracle_auth}" < 1: if sys.argv[1] == 'autoconf': autoconf() @@ -91,19 +96,21 @@ if len(sys.argv) > 1: print('unknown argument "' + sys.argv[1] + '"') sys.exit(1) -pages_shared=int(open('/sys/kernel/mm/ksm/pages_shared').read()) -pages_sharing=int(open('/sys/kernel/mm/ksm/pages_sharing').read()) -pages_unshared=int(open('/sys/kernel/mm/ksm/pages_unshared').read()) -pages_volatile=int(open('/sys/kernel/mm/ksm/pages_volatile').read()) -full_scans=int(open('/sys/kernel/mm/ksm/full_scans').read()) +pages_shared = int(open('/sys/kernel/mm/ksm/pages_shared').read()) +pages_sharing = int(open('/sys/kernel/mm/ksm/pages_sharing').read()) +pages_unshared = int(open('/sys/kernel/mm/ksm/pages_unshared').read()) +pages_volatile = int(open('/sys/kernel/mm/ksm/pages_volatile').read()) +full_scans = int(open('/sys/kernel/mm/ksm/full_scans').read()) -if('ksm_pages_absolute' in sys.argv[0]): - print 'pages_shared.value %i' % pages_shared - print 'pages_sharing.value %i' % pages_sharing - print 'pages_unshared.value %i' % pages_unshared - print 'pages_volatile.value %i' % pages_volatile -elif('ksm_pages_relative' in sys.argv[0]): - print 'pages_sharing_shared.value %f' % (float(pages_sharing)/float(pages_shared) if pages_shared>0 else 0) - print 'pages_unshared_sharing.value %f' % (float(pages_unshared)/float(pages_sharing) if pages_sharing>0 else 0) -elif('ksm_full_scans' in sys.argv[0]): - print 'full_scans.value %i' % full_scans +if('ksm_pages_absolute' in sys.argv[0]): + print('pages_shared.value %i' % pages_shared) + print('pages_sharing.value %i' % pages_sharing) + print('pages_unshared.value %i' % pages_unshared) + print('pages_volatile.value %i' % pages_volatile) +elif('ksm_pages_relative' in sys.argv[0]): + print('pages_sharing_shared.value %f' + % (float(pages_sharing) / float(pages_shared) if pages_shared > 0 else 0)) + print('pages_unshared_sharing.value %f' + % (float(pages_unshared) / float(pages_sharing) if pages_sharing > 0 else 0)) +elif('ksm_full_scans' in sys.argv[0]): + print('full_scans.value %i' % full_scans) diff --git a/plugins/other/listeners b/plugins/other/listeners index 8f426d36..efe31fe4 100755 --- a/plugins/other/listeners +++ b/plugins/other/listeners @@ -15,7 +15,7 @@ # yyyy/mm/dd v author changelog: -# 2008/01/15 v0.02 Lothar Schmidt added alternetive retrievers, stream name substitution +# 2008/01/15 v0.02 Lothar Schmidt added alternative retrievers, stream name substitution # 2008/01/15 v0.01 Lothar Schmidt initial version, email: l.make.a.noise.here@scarydevilmonastery.net @@ -71,7 +71,7 @@ LASTSTREAM=${#STREAMS[*]} # --- check whether any stream found --- run_autoconf() { if (( $LASTSTREAM )) ; then echo yes ; exit 0 ; fi # found streams - echo U ; exit 1 ; } # no radio or streams + echo no ; exit 0 ; } # no radio or streams @@ -87,7 +87,7 @@ graph_vlabel ${GRAPH} EOF for (( I=2 ; I GPLv2 =head1 MAGIC MARKERS - + #%# family=auto #%# capabilities=autoconf suggest @@ -86,7 +86,7 @@ $graphs{conn} = { config => { args => '--lower-limit 0', vlabel => 'Connections', - category => 'opentracker', + category => 'filetransfer', title => 'Current Connections', info => 'Current Connections to OpenTracker', }, @@ -193,8 +193,8 @@ $graphs{udp4} = { =head2 Config Check This block of code looks at the argument that is possibly supplied, - should it be config, it then checks to make sure the plugin - specified exists, assuming it does, it will run the do_config + should it be config, it then checks to make sure the plugin + specified exists, assuming it does, it will run the do_config subroutine for the plugin specified, otherwise it dies complaining about an unknown plugin. @@ -228,11 +228,10 @@ if (defined $ARGV[0] && $ARGV[0] eq 'autoconf') { my $response = $ua->get($url); if ($response->is_success) { print "yes\n"; - exit 0; } else { print "no: unable to connect to url: $url\n"; - exit 1; } + exit 0; } =head2 Suggest Check @@ -313,7 +312,7 @@ sub print_output { This subroutine prints out the main config information for all of the graphs. It takes one parameters, $plugin - $plugin; graph being called up to print config for + $plugin; graph being called up to print config for Example: print_config($plugin); diff --git a/plugins/other/pacman_pending_updates b/plugins/other/pacman_pending_updates index ed3b8e26..ce5e7f27 100755 --- a/plugins/other/pacman_pending_updates +++ b/plugins/other/pacman_pending_updates @@ -14,12 +14,6 @@ All systems with pacman as their package manager. The plugin needs no additional configuration and works out of the box. -It is possible to add warnings for certain numbers of updates pending. The -following will send a warning when there are more than 10 updates pending. - - [pacman_pending_updates] - env.PENDING_UPDATES_WARNING :10 - =head1 INTERPRETATION This plugin will draw one line: the number of updates pending. @@ -31,7 +25,7 @@ This plugin will draw one line: the number of updates pending. =head1 VERSION - 1.0.0 + 1.1.1 =head1 AUTHOR @@ -53,17 +47,26 @@ graph_category security updates.label updates updates.info Current number of pending updates EOM - if [[ -n $PENDING_UPDATES_WARNING ]]; then - echo updates.warning $PENDING_UPDATES_WARNING - fi ;; autoconf) - hash checkupdates &> /dev/null && echo yes || echo "no (checkupdates not found)" + if hash checkupdates >/dev/null 2>&1; then + echo yes + else + echo "no (checkupdates not found)" + fi ;; *) - echo updates.value $(checkupdates | wc -l) + if updates="$(checkupdates)"; then + if [ -n "$updates" ]; then + echo "updates.value $(echo "$updates" | wc -l)" + echo "updates.extinfo $(echo "$updates" | paste -s -d,)" + else + echo updates.value 0 + fi + echo "updates.value U" && exit + fi ;; esac diff --git a/plugins/other/pid b/plugins/other/pid index 09e2c2b0..f1381a5a 100755 --- a/plugins/other/pid +++ b/plugins/other/pid @@ -8,7 +8,7 @@ GRAPH="pid" SECTION="system" run_autoconf() { - echo yes + echo yes } run_config() { diff --git a/plugins/other/port_ b/plugins/other/port_ index bfd9d832..81fd7f47 100755 --- a/plugins/other/port_ +++ b/plugins/other/port_ @@ -1,4 +1,4 @@ -#! /opt/csw/bin/ruby +#!/usr/bin/env ruby # # Wildcard-script to monitor network port usage using netstat. To monitor a # port, link port_ to this file. E.g. This plugin shall run by root user @@ -15,66 +15,64 @@ require 'rubygems' require 'munin' -SERVICE = $0.split( '_' ).last -SERVICE_F = '/etc/services' -PORT = /^[\d]+(\.[\d]+){0,1}$/ === SERVICE ? SERVICE : %x[grep #{SERVICE} #{SERVICE_F}].split( "\t\t" )[1].split( '/' )[0] +SERVICE = $0.split('_').last +SERVICE_F = '/etc/services'.freeze +PORT = SERVICE =~ /^\d+(\.\d+){0,1}$/ ? SERVICE : `grep #{SERVICE} #{SERVICE_F}`.split("\t\t")[1].split('/')[0] class PortMonit < Munin::Plugin - graph_attributes "#{SERVICE} port usage, known as #{PORT}", - :category => 'network', - :info => 'This graph shows connection split by the state of the socket.', - :vlabel => 'Current connections' + category: 'network', + info: 'This graph shows connection split by the state of the socket.', + vlabel: 'Current connections' - declare_field :ESTABLISHED, - :label => 'Established', :draw => :AREA, - :type => :GAUGE, :min => 0 + declare_field :ESTABLISHED, + label: 'Established', draw: :AREA, + type: :GAUGE, min: 0 - declare_field :CLOSE_WAIT, - :label => 'Waiting close', :draw => :STACK, - :type => :GAUGE, :min => 0 + declare_field :CLOSE_WAIT, + label: 'Waiting close', draw: :STACK, + type: :GAUGE, min: 0 - declare_field :TIME_WAIT, - :label => 'Waiting after close', :draw => :STACK, - :type => :GAUGE, :min => 0 + declare_field :TIME_WAIT, + label: 'Waiting after close', draw: :STACK, + type: :GAUGE, min: 0 - declare_field :CLOSING, - :label => 'Closing', :draw => :STACK, - :type => :GAUGE, :min => 0 + declare_field :CLOSING, + label: 'Closing', draw: :STACK, + type: :GAUGE, min: 0 - declare_field :LAST_ACK, - :label => 'Waiting for acknowledgement', :draw => :STACK, - :type => :GAUGE, :min => 0 + declare_field :LAST_ACK, + label: 'Waiting for acknowledgement', draw: :STACK, + type: :GAUGE, min: 0 - declare_field :FIN_WAIT_1, - :label => 'Socket closed, connection shutting down', :draw => :STACK, - :type => :GAUGE, :min => 0 + declare_field :FIN_WAIT_1, + label: 'Socket closed, connection shutting down', draw: :STACK, + type: :GAUGE, min: 0 - declare_field :FIN_WAIT_2, - :label => 'Connection closed, Socket still waiting', :draw => :STACK, - :type => :GAUGE, :min => 0 + declare_field :FIN_WAIT_2, + label: 'Connection closed, Socket still waiting', draw: :STACK, + type: :GAUGE, min: 0 def retrieve_values - - @_netstat = %x[netstat -n -P tcp | egrep "\.#{PORT} "].split( "\n" ) + @_netstat = `netstat -n -P tcp | egrep "\.#{PORT} "`.split("\n") - - { :ESTABLISHED => count( @_netstat, 'ESTABLISHED' ), - :CLOSE_WAIT => count( @_netstat, 'CLOSE_WAIT' ), - :CLOSING => count( @_netstat, 'CLOSING' ), - :LAST_ACK => count( @_netstat, 'LAST_ACK' ), - :FIN_WAIT_1 => count( @_netstat, 'FIN_WAIT_1' ), - :FIN_WAIT_2 => count( @_netstat, 'FIN_WAIT_2' ), - :TIME_WAIT => count( @_netstat, 'TIME_WAIT' ) } + { ESTABLISHED: count(@_netstat, 'ESTABLISHED'), + CLOSE_WAIT: count(@_netstat, 'CLOSE_WAIT'), + CLOSING: count(@_netstat, 'CLOSING'), + LAST_ACK: count(@_netstat, 'LAST_ACK'), + FIN_WAIT_1: count(@_netstat, 'FIN_WAIT_1'), + FIN_WAIT_2: count(@_netstat, 'FIN_WAIT_2'), + TIME_WAIT: count(@_netstat, 'TIME_WAIT') } end private - def count( source, regex ) + + def count(source, regex) @_result = 0 - source.each { |obj| @_result += 1 if obj.match( regex ) } - - return @_result + source.each { |obj| @_result += 1 if obj.match(regex) } + + @_result end end diff --git a/plugins/other/proc_ b/plugins/other/proc_ index 6577bdb6..751a5699 100755 --- a/plugins/other/proc_ +++ b/plugins/other/proc_ @@ -1,4 +1,4 @@ -#!/usr/bin/perl +#!/usr/bin/perl # -*- perl -*- # # proc_ - Munin plugin to for Process information @@ -7,17 +7,17 @@ # # Author: Kristian Lyngstøl # Author: Trygve Vea -# +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. @@ -39,7 +39,7 @@ The configuration section shows the defaults env.procname init env.category Process Info -env.procname defines the processname as seen inside the parenthesis of the +env.procname defines the processname as seen inside the parenthesis of the second column in /proc/pid/stat. If you don't get the data you expect, you can check if the value is what you expect here. @@ -121,15 +121,15 @@ my @field_parameters = ('graph', 'min', 'max', 'draw', 'cdef', 'warning', 'colour', 'info', 'type'); # Data structure that defines all possible graphs (aspects) and how they # are to be plotted. Every top-level entry is a graph/aspect. Each top-level graph -# MUST have title set and 'values'. -# +# MUST have title set and 'values'. +# # Graphs with 'DEBUG' set to anything is omitted from 'suggest'. -# +# # 'rpn' on values allows easy access to graphs consisting of multiple # values from procstats. (Reverse polish notation). The RPN # implementation only accepts +-*/ and procstats-values. # -# Any value left undefined will be left up to Munin to define/ignore/yell +# Any value left undefined will be left up to Munin to define/ignore/yell # about. # # See munin documentation or rrdgraph/rrdtool for more information. @@ -242,7 +242,7 @@ sub populate_stats # Bail-function. sub usage { - if (defined(@_) && "@_" ne "") { + if (@_ && "@_" ne "") { print STDERR "@_" . "\n\n"; } print STDERR "Known arguments: suggest, config, autoconf.\n"; @@ -258,7 +258,7 @@ sub autoconf exit 0; } -# Suggest relevant aspects/values of $self. +# Suggest relevant aspects/values of $self. # 'DEBUG'-graphs are excluded. sub suggest { @@ -292,7 +292,7 @@ sub print_if_exist } # Walk through the relevant aspect and print all top-level configuration -# values and value-definitions. +# values and value-definitions. sub get_config { my $graph = $_[0]; @@ -328,7 +328,7 @@ sub get_config } } -# Read and verify the aspect ($self). +# Read and verify the aspect ($self). sub set_aspect { $self = $0; diff --git a/plugins/other/qstatcod4and5_ b/plugins/other/qstatcod4and5_ deleted file mode 100755 index e60d9156..00000000 --- a/plugins/other/qstatcod4and5_ +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/sh -################################################################# -# Title : Qstat plugin for Munin # -# Author : Benjamin DUPUIS - Poil # -# Email : poil@quake.fr # -# First release : 18/10/2007 # -#---------------------------------------------------------------# -# Edited: Rouven David Nal - peperoni # -# Edit : 09/01/2009 # -# Plugin edited for COD4+COD5 # -# Email: peperoni@sac-esports.de # -#---------------------------------------------------------------# -################################################################# -# Variable : # -#---------------------------------------------------------------# -# Set path to QSTAT # -qstat_exe='/usr/local/bin/qstat' # -# Set the Group for munin to be displayed e.x. games or COD4 # -munin_group='games' # -#---------------------------------------------------------------# -# End of config -script_name=$(basename $0) -################################################################# - -################################################################# -# Help # -#---------------------------------------------------------------# -usage() { - echo 'For testing the script, run qstatcod4and5_ cods IP PORT' - echo ' - GameType : cods ... run qstat for seeing available gametype' - echo 'For munin you must ln -s /usr/share/munin/plugins/qstatcod4and5_ /etc/munin/plugins/cod4_cods_IP_PORT' - echo 'Example you will test this COD4 Server: 123.456.789.123:28960' - echo 'your symlink looks like this: ln -s /usr/share/munin/plugins/cod4server /etc/munin/plugins/cod4_cods_123.456.789.123_28960' - echo 'Perhaps you must have to set qstat_exe path, actually on'${qstat_exe}; - echo 'Have Fun' -} - -config() { - if [ "${script_name}" != "qstatcod4and5_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) - else - gametype=$1 - ip=$2 - port=$3 - fi - -echo "graph_title Number of players on ${gametype} - ${ip}:${port} -graph_vlabel players -graph_category ${munin_group} -player.label players" -} - -################################################################# -# Quake Stat, call qstat # -#---------------------------------------------------------------# -quake_stat() { - if [ "${script_name}" != "qstatcod4and5_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) - else - gametype=$1 - ip=$2 - port=$3 - fi - - if [ ! -z ${gametype} ] && [ ! -z ${gametype} ] && [ ! -z ${gametype} ]; then - dummy=$(${qstat_exe} -P -pa -sort P -${gametype} ${ip}:${port} | grep frags | wc -l) - playervalue=$dummy - - if [ -z "${playervalue}" ]; then - playervalue=0 - fi - - echo "player.value "${playervalue}; - else - echo "player.value U" - fi -} - -################################################################# -# Main # -#---------------------------------------------------------------# -case $1 in - config) - config - exit 0 - ;; - help | ?) - usage - exit 0 - ;; - autoconf) - echo "no (edit the script for set qstat path)" - ;; - *) - quake_stat $1 $2 $3 - exit 0 - ;; -esac - diff --git a/plugins/other/radwho_cnt b/plugins/other/radwho_cnt index c7664ab3..722398db 100755 --- a/plugins/other/radwho_cnt +++ b/plugins/other/radwho_cnt @@ -11,12 +11,11 @@ if [ "$1" = "config" ]; then echo 'graph_args --base 1000 -l 0' echo 'graph_vlabel users' echo 'graph_noscale true' - echo 'graph_category other' + echo 'graph_category auth' echo 'usercnt.label users' echo 'graph_info FreeRadius active users count.' echo 'usercnt.info FreeRadius active users count' exit 0 fi -echo "usercnt.value $($radwho -rci 2>/dev/null | wc -l)" - +echo "usercnt.value $("$radwho" -rci 2>/dev/null | wc -l)" diff --git a/plugins/other/s9y b/plugins/other/s9y index f66b688e..baf96889 100755 --- a/plugins/other/s9y +++ b/plugins/other/s9y @@ -1,8 +1,8 @@ #!/bin/bash -# $Id: s9y 7 2010-02-24 22:15:37Z root $ +# $Id: s9y 7 2010-02-24 22:15:37Z root $ # $Rev: 7 $ # $Author: root $ -# $Date: 2010-02-24 23:15:37 +0100 (Mi, 24. Feb 2010) $ +# $Date: 2010-02-24 23:15:37 +0100 (Mi, 24. Feb 2010) $ # # : < 0 && ARGV[0] == 'config' - puts "graph_title Passenger Memory Usage" - puts "graph_category webserver" - puts "graph_vlabel MB" - puts "apache_rss.label Apache Dirty RSS" - puts "passenger_rss.label Passenger Dirty RSS" + puts 'graph_title Passenger Memory Usage' + puts 'graph_category webserver' + puts 'graph_vlabel MB' + puts 'apache_rss.label Apache Dirty RSS' + puts 'passenger_rss.label Passenger Dirty RSS' exit(0) end @@ -24,10 +24,10 @@ passenger_rss = nil `#{memory_stats_command}`.each_line do |line| next unless /### Total private dirty RSS: (\d+\.\d+) MB/.match(line) + passenger_rss = $~[1] unless apache_rss.nil? apache_rss ||= $~[1] end puts "apache_rss.value #{apache_rss}" puts "passenger_rss.value #{passenger_rss}" - diff --git a/plugins/passenger/passenger_processes b/plugins/passenger/passenger_processes index d8097e96..8b4b5a4b 100755 --- a/plugins/passenger/passenger_processes +++ b/plugins/passenger/passenger_processes @@ -8,13 +8,13 @@ process_stats_command = ENV['process_stats_command'] || '/opt/ruby-enterprise-1.8.6-20080810/bin/passenger-status' if ARGV.length > 0 && ARGV[0] == 'config' - puts "graph_title Passenger Processes" - puts "graph_category webserver" - puts "graph_vlabel processes" - puts "max.label Max processes" - puts "count.label Total processes" - puts "active.label Active processes" - puts "queued.label Queued requests" + puts 'graph_title Passenger Processes' + puts 'graph_category webserver' + puts 'graph_vlabel processes' + puts 'max.label Max processes' + puts 'count.label Total processes' + puts 'active.label Active processes' + puts 'queued.label Queued requests' exit(0) end @@ -24,13 +24,14 @@ active = nil queued = nil `#{process_stats_command}`.each_line do |line| - if /max\s+=\s+(\d+)/.match(line) + case line + when /max\s+=\s+(\d+)/ max = $~[1] - elsif /count\s+=\s+(\d+)/.match(line) + when /count\s+=\s+(\d+)/ count = $~[1] - elsif /^active\s+=\s+(\d+)/.match(line) + when /^active\s+=\s+(\d+)/ active = $~[1] - elsif /Waiting on global queue\s+=\s+(\d+)/.match(line) + when /Waiting on global queue\s+=\s+(\d+)/ queued = $~[1] end end @@ -39,4 +40,3 @@ puts "max.value #{max}" puts "count.value #{count}" puts "active.value #{active}" puts "queued.value #{queued.to_i}" - diff --git a/plugins/passenger/passenger_status b/plugins/passenger/passenger_status index cd3e2a17..9ac5738a 100755 --- a/plugins/passenger/passenger_status +++ b/plugins/passenger/passenger_status @@ -1,54 +1,54 @@ #!/usr/bin/env ruby - + def output_config - puts <<-END -graph_category webserver -graph_title status -graph_vlabel count -graph_info This graph shows how much passenger process are working, available and how much queries are waiting. -max.label max processes -max.draw AREA -max.info Maximum processes allowed to run simultaneously. -sessions.label queued requests -sessions.draw LINE2 -sessions.info Requests queued, waiting to be processed. -running.label running processes -running.draw LINE1 -running.info The number of application instances that are currently alive. -active.label active processes -active.draw LINE1 -active.info The number of application instances that are currently processing requests. -waiting.label waiting requests -waiting.draw LINE2 -waiting.info Requests waiting to be queued. -END + puts <<~END + graph_category webserver + graph_title status + graph_vlabel count + graph_info This graph shows how much passenger process are working, available and how much queries are waiting. + max.label max processes + max.draw AREA + max.info Maximum processes allowed to run simultaneously. + sessions.label queued requests + sessions.draw LINE2 + sessions.info Requests queued, waiting to be processed. + running.label running processes + running.draw LINE1 + running.info The number of application instances that are currently alive. + active.label active processes + active.draw LINE1 + active.info The number of application instances that are currently processing requests. + waiting.label waiting requests + waiting.draw LINE2 + waiting.info Requests waiting to be queued. + END exit 0 end - + def output_values status = `sudo passenger-status` unless $?.success? - $stderr.puts "failed executing passenger-status" + warn 'failed executing passenger-status' exit 1 end status =~ /max\s+=\s+(\d+)/ - puts "max.value #{$1}" - + puts "max.value #{Regexp.last_match(1)}" + status =~ /count\s+=\s+(\d+)/ - puts "running.value #{$1}" - + puts "running.value #{Regexp.last_match(1)}" + status =~ /active\s+=\s+(\d+)/ - puts "active.value #{$1}" - + puts "active.value #{Regexp.last_match(1)}" + status =~ /Waiting on global queue:\s+(\d+)/ - puts "waiting.value #{$1}" + puts "waiting.value #{Regexp.last_match(1)}" total_sessions = 0 status.scan(/Sessions: (\d+)/).flatten.each { |count| total_sessions += count.to_i } puts "sessions.value #{total_sessions}" end - -if ARGV[0] == "config" + +if ARGV[0] == 'config' output_config else output_values diff --git a/plugins/pdns/pdns_errors b/plugins/pdns/pdns_errors index 82b0ffcb..b971860e 100755 --- a/plugins/pdns/pdns_errors +++ b/plugins/pdns/pdns_errors @@ -9,11 +9,17 @@ #%# family=auto #%# capabilities=autoconf -command="/etc/init.d/pdns dump" +pdns_control="/usr/bin/pdns_control" +command="$pdns_control list" if [ "$1" = "autoconf" ]; then - echo yes - exit 0 + if [ -e "$pdns_control" ]; then + echo yes + exit 0 + else + echo "no (missing $pdns_control)" + exit 0 + fi fi if [ "$1" = "config" ]; then @@ -38,4 +44,4 @@ if [ "$1" = "config" ]; then fi -$command | sed 's/=\([0-9]\+\),/.value \1\n/g' | grep corrupt'\|'servfail'\|'timedout | sed 's/-/_/g' +$command | sed 's/=\([0-9]\+\),/.value \1\n/g' | egrep "corrupt|servfail|timedout" | sed 's/-/_/g' diff --git a/plugins/pdns/pdns_latency b/plugins/pdns/pdns_latency index 5178dfd6..47753079 100755 --- a/plugins/pdns/pdns_latency +++ b/plugins/pdns/pdns_latency @@ -9,11 +9,17 @@ #%# family=auto #%# capabilities=autoconf -command="/etc/init.d/pdns show" +pdns_control="/usr/bin/pdns_control" +command="$pdns_control show" if [ "$1" = "autoconf" ]; then - echo yes - exit 0 + if [ -e "$pdns_control" ]; then + echo yes + exit 0 + else + echo "no (missing $pdns_control)" + exit 0 + fi fi if [ "$1" = "config" ]; then @@ -31,4 +37,4 @@ fi -echo "latency.value $($command latency | awk -F= '{print $2}')" +echo "latency.value $($command latency)" diff --git a/plugins/pdns/pdns_qsize b/plugins/pdns/pdns_qsize index 5256e8e6..4d0a86c7 100755 --- a/plugins/pdns/pdns_qsize +++ b/plugins/pdns/pdns_qsize @@ -9,11 +9,17 @@ #%# family=auto #%# capabilities=autoconf -command="/etc/init.d/pdns show" +pdns_control="/usr/bin/pdns_control" +command="$pdns_control show" if [ "$1" = "autoconf" ]; then - echo yes - exit 0 + if [ -e "$pdns_control" ]; then + echo yes + exit 0 + else + echo "no (missing $pdns_control)" + exit 0 + fi fi if [ "$1" = "config" ]; then @@ -31,5 +37,5 @@ fi -echo "qsize.value $($command qsize_q | awk -F= '{print $2}')" +echo "qsize.value $($command qsize-q)" diff --git a/plugins/pdns/pdns_queries b/plugins/pdns/pdns_queries index 82f85477..edad5810 100755 --- a/plugins/pdns/pdns_queries +++ b/plugins/pdns/pdns_queries @@ -9,11 +9,17 @@ #%# family=auto #%# capabilities=autoconf -command="/etc/init.d/pdns dump" +pdns_control="/usr/bin/pdns_control" +command="/usr/bin/pdns_control list" if [ "$1" = "autoconf" ]; then - echo yes - exit 0 + if [ -e "$pdns_control" ]; then + echo yes + exit 0 + else + echo "no (missing $pdns_control)" + exit 0 + fi fi if [ "$1" = "config" ]; then @@ -50,4 +56,4 @@ if [ "$1" = "config" ]; then fi -$command | sed 's/=\([0-9]\+\),/.value \1\n/g' | grep udp-'\|'recursing'\|'tcp | sed 's/-/_/g' +$command | sed 's/=\([0-9]\+\),/.value \1\n/g' | egrep "udp-|recursing|tcp" | sed 's/-/_/g' diff --git a/plugins/pdns/pdns_rec_answers b/plugins/pdns/pdns_rec_answers index a78de5de..a7db89f6 100755 --- a/plugins/pdns/pdns_rec_answers +++ b/plugins/pdns/pdns_rec_answers @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -65,11 +67,11 @@ if [ "$1" = "config" ]; then exit 0 fi -echo a.value `rec_control get answers0-1` -echo b.value `rec_control get answers1-10` -echo c.value `rec_control get answers10-100` -echo d.value `rec_control get answers100-1000` -echo e.value `rec_control get answers-slow` -echo f.value `rec_control get outgoing-timeouts` +echo a.value "$($rec_control get answers0-1)" +echo b.value "$($rec_control get answers1-10)" +echo c.value "$($rec_control get answers10-100)" +echo d.value "$($rec_control get answers100-1000)" +echo e.value "$($rec_control get answers-slow)" +echo f.value "$($rec_control get outgoing-timeouts)" exit 0 diff --git a/plugins/pdns/pdns_rec_cache b/plugins/pdns/pdns_rec_cache index e72d1ed0..007ad4cd 100755 --- a/plugins/pdns/pdns_rec_cache +++ b/plugins/pdns/pdns_rec_cache @@ -9,17 +9,19 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi -RESENDS=`rec_control get cache-resends` +RESENDS="$($rec_control get cache-resends)" ISRESENDS="" [ "$RESENDS" != "UNKNOWN" ] && ISRESENDS="resends" @@ -53,8 +55,8 @@ if [ "$1" = "config" ]; then exit 0 fi -echo hits.value `rec_control get cache-hits` -echo misses.value `rec_control get cache-misses` -[ "$RESENDS" != "UNKNOWN" ] && echo resends.value `rec_control get cache-resends` +echo hits.value "$($rec_control get cache-hits)" +echo misses.value "$($rec_control get cache-misses)" +[ "$RESENDS" != "UNKNOWN" ] && echo resends.value "$($rec_control get cache-resends)" exit 0 diff --git a/plugins/pdns/pdns_rec_cache_size b/plugins/pdns/pdns_rec_cache_size index 6e855253..cec24d94 100755 --- a/plugins/pdns/pdns_rec_cache_size +++ b/plugins/pdns/pdns_rec_cache_size @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -39,7 +41,7 @@ if [ "$1" = "config" ]; then exit 0 fi -echo entries.value `rec_control get cache-entries` -echo negative.value `rec_control get negcache-entries` +echo entries.value "$($rec_control get cache-entries)" +echo negative.value "$($rec_control get negcache-entries)" exit 0 diff --git a/plugins/pdns/pdns_rec_concurrent b/plugins/pdns/pdns_rec_concurrent index bb3ac11e..dcba6ad5 100755 --- a/plugins/pdns/pdns_rec_concurrent +++ b/plugins/pdns/pdns_rec_concurrent @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -34,6 +36,6 @@ if [ "$1" = "config" ]; then exit 0 fi -echo concurrent.value `rec_control get concurrent-queries` +echo concurrent.value "$($rec_control get concurrent-queries)" exit 0 diff --git a/plugins/pdns/pdns_rec_issues b/plugins/pdns/pdns_rec_issues index d605113f..1191bb17 100755 --- a/plugins/pdns/pdns_rec_issues +++ b/plugins/pdns/pdns_rec_issues @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -59,10 +61,10 @@ if [ "$1" = "config" ]; then exit 0 fi -echo spoofs.value `rec_control get spoof-prevents` -echo resource.value `rec_control get resource-limits` -echo client.value `rec_control get client-parse-errors` -echo server.value `rec_control get server-parse-errors` -echo overflow.value `rec_control get tcp-client-overflow` +echo spoofs.value "$($rec_control get spoof-prevents)" +echo resource.value "$($rec_control get resource-limits)" +echo client.value "$($rec_control get client-parse-errors)" +echo server.value "$($rec_control get server-parse-errors)" +echo overflow.value "$($rec_control get tcp-client-overflow)" exit 0 diff --git a/plugins/pdns/pdns_rec_outqueries b/plugins/pdns/pdns_rec_outqueries index 524d0056..000c436c 100755 --- a/plugins/pdns/pdns_rec_outqueries +++ b/plugins/pdns/pdns_rec_outqueries @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -41,7 +43,7 @@ if [ "$1" = "config" ]; then exit 0 fi -echo all.value `rec_control get all-outqueries` -echo tcp.value `rec_control get tcp-outqueries` +echo all.value "$($rec_control get all-outqueries)" +echo tcp.value "$($rec_control get tcp-outqueries)" exit 0 diff --git a/plugins/pdns/pdns_rec_qtypes b/plugins/pdns/pdns_rec_qtypes index caf35378..284d9380 100755 --- a/plugins/pdns/pdns_rec_qtypes +++ b/plugins/pdns/pdns_rec_qtypes @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -236,15 +238,15 @@ BEGIN { { if (substr($1, 0, 4) == "TYPE") { unk += ($2+0); - } else { + } else { print tolower($1)".value "$2; - } -}; + } +}; END { print "unk.value "unk; } ' -rec_control get-qtypelist | awk "$awkscript" +"$rec_control" get-qtypelist | awk "$awkscript" exit 0 diff --git a/plugins/pdns/pdns_rec_querylatency b/plugins/pdns/pdns_rec_querylatency index 20b63973..84de6803 100755 --- a/plugins/pdns/pdns_rec_querylatency +++ b/plugins/pdns/pdns_rec_querylatency @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -34,6 +36,6 @@ if [ "$1" = "config" ]; then exit 0 fi -echo latency.value `rec_control get qa-latency` +echo latency.value "$($rec_control get qa-latency)" exit 0 diff --git a/plugins/pdns/pdns_rec_questions b/plugins/pdns/pdns_rec_questions index 2a84ef63..f0cfe59c 100755 --- a/plugins/pdns/pdns_rec_questions +++ b/plugins/pdns/pdns_rec_questions @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -41,7 +43,7 @@ if [ "$1" = "config" ]; then exit 0 fi -echo all.value `rec_control get questions` -echo tcp.value `rec_control get tcp-questions` +echo all.value "$($rec_control get questions)" +echo tcp.value "$($rec_control get tcp-questions)" exit 0 diff --git a/plugins/pdns/pdns_rec_throttle b/plugins/pdns/pdns_rec_throttle index d82ffc1f..df6f0758 100755 --- a/plugins/pdns/pdns_rec_throttle +++ b/plugins/pdns/pdns_rec_throttle @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -34,6 +36,6 @@ if [ "$1" = "config" ]; then exit 0 fi -echo throttled.value `rec_control get throttled-out` +echo throttled.value "$($rec_control get throttled-out)" exit 0 diff --git a/plugins/pdns/pdns_rec_unauth b/plugins/pdns/pdns_rec_unauth index 2f3df151..92df5af1 100755 --- a/plugins/pdns/pdns_rec_unauth +++ b/plugins/pdns/pdns_rec_unauth @@ -9,13 +9,15 @@ # echo '[pdns_rec_*]' >/etc/munin/plugin-conf.d/pdns_rec # echo 'user root' >>/etc/munin/plugin-conf.d/pdns_rec +rec_control="/usr/bin/rec_control" + if [ "$1" = "autoconf" ]; then - if [ -e /usr/bin/rec_control ]; then + if [ -e "$rec_control" ]; then echo yes exit 0 else - echo no - exit 1 + echo "no (missing $rec_control)" + exit 0 fi fi @@ -47,8 +49,8 @@ if [ "$1" = "config" ]; then exit 0 fi -echo tcp.value `rec_control get unauthorized-tcp` -echo udp.value `rec_control get unauthorized-udp` -echo unexpected.value `rec_control get unexpected-packets` +echo tcp.value "$($rec_control get unauthorized-tcp)" +echo udp.value "$($rec_control get unauthorized-udp)" +echo unexpected.value "$($rec_control get unexpected-packets)" exit 0 diff --git a/plugins/pdns/pdns_rel b/plugins/pdns/pdns_rel index eb660507..b42439d8 100755 --- a/plugins/pdns/pdns_rel +++ b/plugins/pdns/pdns_rel @@ -9,12 +9,18 @@ #%# family=auto #%# capabilities=autoconf -command="/etc/init.d/pdns show" +pdns_control="/usr/bin/pdns_control" +command="$pdns_control show" state_file=$MUNIN_PLUGSTATE/pdns_rel.state if [ "$1" = "autoconf" ]; then - echo yes - exit 0 + if [ -e "$pdns_control" ]; then + echo yes + exit 0 + else + echo "no (missing $pdns_control)" + exit 0 + fi fi if [ "$1" = "config" ]; then @@ -31,18 +37,20 @@ if [ "$1" = "config" ]; then exit 0 fi -hits=$($command packetcache-hit | awk -F= '{print $2}') -queries=$($command udp-queries | awk -F= '{print $2}') -old_hits=$(cat $state_file | head -n1) -old_queries=$(cat $state_file | tail -n1) - -if [ -f $state_file ] && [ $(ls -l --time-style=+%s $state_file | awk '{print $6}') -gt $(date --date="7 minutes ago" +%s) ] ; then - d_hits=$(($hits - $old_hits)) - d_queries=$(($queries - $old_queries)) - if [ $d_queries -gt 0 ] ; then - echo packetcache_hitrate.value $(( $d_hits * 100 / $d_queries )) - fi +hits=$($command packetcache-hit) +queries=$($command udp-queries) +if [ -f "$state_file" ]; then + old_hits=$(head -n1 "$state_file") + old_queries=$(tail -n1 "$state_file") fi -echo $hits > $state_file -echo $queries >> $state_file +if [ -f "$state_file" ] && [ "$(stat --format=%Y "$state_file")" -gt "$(date --date="7 minutes ago" +%s)" ] ; then + d_hits=$((hits - old_hits)) + d_queries=$((queries - old_queries)) + if [ $d_queries -gt 0 ] ; then + echo packetcache_hitrate.value $(( d_hits * 100 / d_queries )) + fi +fi + +echo "$hits" > "$state_file" +echo "$queries" >> "$state_file" diff --git a/plugins/percona/percona_ b/plugins/percona/percona_ old mode 100644 new mode 100755 index 3340b468..4992b550 --- a/plugins/percona/percona_ +++ b/plugins/percona/percona_ @@ -24,7 +24,8 @@ # This plugin requires pythons MySQLdb module which can be installed via easy_install. # # ## Installation -# Copy file to directory /usr/share/munin/plugins/ and create symbolic links for each type you wish to monitor: +# Copy file to directory /usr/share/munin/plugins/ and create symbolic links for each type you wish +# to monitor: # percona_flow # percona_queues # percona_replication @@ -37,52 +38,57 @@ # env.user root # env.password vErYsEcReT # -#%# capabilities=autoconf -#%# family=contrib +# #%# capabilities=autoconf +# #%# family=contrib +import os +import sys from warnings import filterwarnings -import os, sys, MySQLdb, MySQLdb.cursors -filterwarnings('ignore', category = MySQLdb.Warning) -progName = os.path.basename(__file__) +import MySQLdb +import MySQLdb.cursors + +filterwarnings('ignore', category=MySQLdb.Warning) + +program_name = os.path.basename(__file__) variables = { - 'percona_queues': { - 'label': 'Queue sizes', - 'vlabel': 'size', - 'fields': ['wsrep_local_recv_queue', 'wsrep_local_send_queue'] - }, - 'percona_flow': { - 'label': 'Flow control', - 'vlabel': '', - 'fields': ['wsrep_flow_control_sent', 'wsrep_flow_control_recv'] - }, - 'percona_transactions': { - 'label': 'Transactions in and out', - 'vlabel': 'transactions', - 'fields': ['wsrep_replicated', 'wsrep_received'] - }, - 'percona_transactions_bytes': { - 'label': 'Transactions in and out in bytes', - 'vlabel': 'bytes', - 'fields': ['wsrep_replicated_bytes', 'wsrep_received_bytes'] - }, - 'percona_replication': { - 'label': 'Replication conflicts', - 'vlabel': 'conflicts', - 'fields': ['wsrep_local_cert_failures', 'wsrep_local_bf_aborts'], - } + 'percona_queues': { + 'label': 'Queue sizes', + 'vlabel': 'size', + 'fields': ['wsrep_local_recv_queue', 'wsrep_local_send_queue'] + }, + 'percona_flow': { + 'label': 'Flow control', + 'vlabel': '', + 'fields': ['wsrep_flow_control_sent', 'wsrep_flow_control_recv'] + }, + 'percona_transactions': { + 'label': 'Transactions in and out', + 'vlabel': 'transactions', + 'fields': ['wsrep_replicated', 'wsrep_received'] + }, + 'percona_transactions_bytes': { + 'label': 'Transactions in and out in bytes', + 'vlabel': 'bytes', + 'fields': ['wsrep_replicated_bytes', 'wsrep_received_bytes'] + }, + 'percona_replication': { + 'label': 'Replication conflicts', + 'vlabel': 'conflicts', + 'fields': ['wsrep_local_cert_failures', 'wsrep_local_bf_aborts'], + } } # Parse environment variables # Mysql host -if "host" in os.environ and os.environ["host"] != None: +if "host" in os.environ and os.environ["host"] is not None: server = os.environ["host"] else: - server = "localhost" + server = "localhost" # Mysql port -if "port" in os.environ and os.environ["port"] != None: +if "port" in os.environ and os.environ["port"] is not None: try: port = int(os.environ["port"]) except ValueError: @@ -91,13 +97,13 @@ else: port = 3306 # Mysql username -if "user" in os.environ and os.environ["user"] != None: +if "user" in os.environ and os.environ["user"] is not None: login = os.environ["user"] else: login = "" # Mysql password -if "password" in os.environ and os.environ["password"] != None: +if "password" in os.environ and os.environ["password"] is not None: passw = os.environ["password"] else: passw = "" @@ -105,9 +111,9 @@ else: # Mysql connection handler conn = None -label = variables[progName]['label'] -vlabel = variables[progName]['vlabel'] -fields = ['\'{0}\''.format(x) for x in variables[progName]['fields']] +label = variables[program_name]['label'] +vlabel = variables[program_name]['vlabel'] +fields = ["'{0}'".format(x) for x in variables[program_name]['fields']] query = "show status where Variable_name in (%s)" % ', '.join(fields) @@ -115,31 +121,31 @@ query = "show status where Variable_name in (%s)" % ', '.join(fields) try: conn = MySQLdb.connect(host=server, user=login, passwd=passw) cursor = conn.cursor() -except MySQLdb.Error, e: - print "Error %d: %s" % (e.args[0], e.args[1]) +except MySQLdb.Error as e: + print("Error %d: %s" % (e.args[0], e.args[1])) sys.exit(1) values = {} if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - print "yes" + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": - print "graph_title %s" % label - print "graph_vlabel %s" % vlabel - print "graph_category db" - print "" + print("graph_title %s" % label) + print("graph_vlabel %s" % vlabel) + print("graph_category db") + print() try: cursor.execute(query) results = cursor.fetchall() for result in results: - print "%s_size.label %s" % (result[0], result[0]) + print("%s_size.label %s" % (result[0], result[0])) - except MySQLdb.Error, e: - print "Error %d: %s" % (e.args[0], e.args[1]) + except MySQLdb.Error as e: + print("Error %d: %s" % (e.args[0], e.args[1])) else: try: @@ -147,10 +153,10 @@ else: results = cursor.fetchall() for result in results: - print "%s_size.value %s" % (result[0], result[1]) + print("%s_size.value %s" % (result[0], result[1])) - except MySQLdb.Error, e: - print "Error %d: %s" % (e.args[0], e.args[1]) + except MySQLdb.Error as e: + print("Error %d: %s" % (e.args[0], e.args[1])) if conn: conn.close() diff --git a/plugins/pf/pf b/plugins/pf/pf index 5f1366ab..afc46618 100755 --- a/plugins/pf/pf +++ b/plugins/pf/pf @@ -81,31 +81,31 @@ case $1 in # enabled? if [ `${pfctl} -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8))" - exit 1 + exit 0 fi # FreeBSD elif [ ${ostype} = "FreeBSD" ]; then # enabled? if [ `${pfctl} -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8))" - exit 1 + exit 0 fi # OpenBSD elif [ ${ostype} = "OpenBSD" ]; then # pf(4) module loaded? if [ `kldstat -v | grep pf | wc -l` -eq 0 ]; then echo "no (pf(4) is not loaded)" - exit 1 + exit 0 fi # enabled? if [ `${pfctl} -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8))" - exit 1 + exit 0 fi # Other OSes else echo "no (this plugin is not supported on your OS)" - exit 1 + exit 0 fi echo "yes" exit 0 diff --git a/plugins/pf/pf_bytes b/plugins/pf/pf_bytes index c3875b3a..98e67ae9 100755 --- a/plugins/pf/pf_bytes +++ b/plugins/pf/pf_bytes @@ -2,7 +2,7 @@ # # OpenBSD's pf(4) monitoring for OpenBSD # 2007, Originally by Gergely Czuczy -# for FreeBSD systems. Ported and splitted by the +# for FreeBSD systems. Ported and split by the # immerda admin team admin(at)immerda.ch # this version is adapted for openbsd and is only tested on # openbsd systems. @@ -51,12 +51,12 @@ END { # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # Other OSes else echo "no (this plugin is not supported on your OS)" - exit 1 + exit 0 fi echo "yes" exit 0 diff --git a/plugins/pf/pf_openbsd b/plugins/pf/pf_openbsd index 03160e14..1dc1e163 100755 --- a/plugins/pf/pf_openbsd +++ b/plugins/pf/pf_openbsd @@ -2,7 +2,7 @@ # # OpenBSD's pf(4) monitoring for OpenBSD # 2007, Originally by Gergely Czuczy -# for FreeBSD systems. Ported and splitted by the +# for FreeBSD systems. Ported and split by the # immerda admin team admin(at)immerda.ch # this version is adapted for openbsd and is only tested on # openbsd systems. @@ -48,24 +48,24 @@ EOF # pf(4) module loaded? if [ `kldstat -v | grep pf | wc -l` -eq 0 ]; then echo "no (pf(4) is not loaded)" - exit 1 + exit 0 fi # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # OpenBSD elif [ ${ostype} = "OpenBSD" ]; then # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # Other OSes else echo "no (this plugin is not supported on your OS)" - exit 1 + exit 0 fi echo "yes" exit 0 diff --git a/plugins/pf/pf_packets b/plugins/pf/pf_packets index 7995aee6..fc379996 100755 --- a/plugins/pf/pf_packets +++ b/plugins/pf/pf_packets @@ -2,7 +2,7 @@ # # OpenBSD's pf(4) monitoring for OpenBSD # 2007, Originally by Gergely Czuczy -# for FreeBSD systems. Ported and splitted by the +# for FreeBSD systems. Ported and split by the # immerda admin team admin(at)immerda.ch # this version is adapted for openbsd and is only tested on # openbsd systems. @@ -50,24 +50,24 @@ END { # pf(4) module loaded? if [ `kldstat -v | grep pf | wc -l` -eq 0 ]; then echo "no (pf(4) is not loaded)" - exit 1 + exit 0 fi # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # OpenBSD elif [ ${ostype} = "OpenBSD" ]; then # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # Other OSes else echo "no (this plugin is not supported on your OS)" - exit 1 + exit 0 fi echo "yes" exit 0 diff --git a/plugins/pf/pf_states b/plugins/pf/pf_states index 5e57ea60..e15df623 100755 --- a/plugins/pf/pf_states +++ b/plugins/pf/pf_states @@ -2,7 +2,7 @@ # # OpenBSD's pf(4) monitoring for OpenBSD # 2007, Originally by Gergely Czuczy -# for FreeBSD systems. Ported and splitted by the +# for FreeBSD systems. Ported and split by the # immerda admin team admin(at)immerda.ch # this version is adapted for openbsd and is only tested on # openbsd systems. @@ -44,24 +44,24 @@ ${pfctl} -sm 2> /dev/null | awk ' # pf(4) module loaded? if [ `kldstat -v | grep pf | wc -l` -eq 0 ]; then echo "no (pf(4) is not loaded)" - exit 1 + exit 0 fi # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # OpenBSD elif [ ${ostype} = "OpenBSD" ]; then # enabled? if [ `pfctl -si 2>/dev/null | awk '/^Status:/{print $2}'` != "Enabled" ]; then echo "no (pf(4) is not enabled, consult pfctl(8)" - exit 1 + exit 0 fi # Other OSes else echo "no (this plugin is not supported on your OS)" - exit 1 + exit 0 fi echo "yes" exit 0 diff --git a/plugins/pf/pf_tables_ b/plugins/pf/pf_tables_ old mode 100644 new mode 100755 diff --git a/plugins/php/eaccelerator b/plugins/php/eaccelerator index 9a9fcbc7..d7885118 100755 --- a/plugins/php/eaccelerator +++ b/plugins/php/eaccelerator @@ -18,8 +18,8 @@ # - php file placed on web server , paste there below code (strip hash files first) # # 0,"memoryAvailable"=>0,"memoryAllocated"=>0,"cachedScripts"=>0,"removedScripts"=>0,"cachedKeys"=>0); # if(!function_exists("eaccelerator_info")) # $info = $keys; @@ -28,14 +28,14 @@ # foreach($keys as $key => $val) echo strtolower($key).".value ".$info[$key]."\n"; # ?> # -# - name that file eaccelerator_status.php, will be easier, file should be at least accesible from the address that runs this script (usually localhost) -# you can make this file accessible globally, it just displays the memor usage by eaccelerator, thats all. +# - name that file eaccelerator_status.php, will be easier, file should be at least accessible from the address that runs this script (usually localhost) +# you can make this file accessible globally, it just displays the memory usage by eaccelerator, that's all. # usually you can put it to the /var/www/ (but it depends on the server configuration etc) # - check if you can see the output of the file, for example if you placed file in the DocumentRoot then it should be available from # http://localhost/eaccelerator_status.php # if you see the plain text with values then its working ok! -# if you see the plain text and all values are zero then probalby eaccelerator is not enabled. -# - installed wget +# if you see the plain text and all values are zero then probably eaccelerator is not enabled. +# - installed wget # ################################################################# # @@ -53,8 +53,8 @@ WGET_FLAGS="-Yoff"; # refer to wget manual, you may set extra parameters like di # Changelog # # Revision 0.1 Tue 03 Feb 2009 02:16:02 PM CET _KaszpiR_ -# - initial release, -# +# - initial release, +# ################################################################# @@ -62,7 +62,7 @@ WGET_FLAGS="-Yoff"; # refer to wget manual, you may set extra parameters like di ################################################################# ################################################################# -# Settigs required for autoconf +# Settings required for autoconf #%# family=manual #%# capabilities=autoconf @@ -79,7 +79,7 @@ if [ "$1" = "config" ]; then echo 'graph_args -l 0' echo 'graph_category webserver' echo 'graph_info This graph shows performance of the eaccelerator module on WWW server.' - + echo 'memorysize.label total' echo 'memorysize.draw AREA' echo 'memorysize.min 0' @@ -91,7 +91,7 @@ if [ "$1" = "config" ]; then # echo "memoryallocated.warning 92" # echo "memoryallocated.critical 98" echo 'memoryallocated.info Memory allocated .' - + echo 'memoryavailable.label available' echo 'memoryavailable.min 0' echo 'memoryavailable.info Memory available .' diff --git a/plugins/php/eaccelerator-python b/plugins/php/eaccelerator-python index dc2bad21..098f204f 100755 --- a/plugins/php/eaccelerator-python +++ b/plugins/php/eaccelerator-python @@ -2,7 +2,7 @@ ''' Plugin to monitor performance of eaccelerator module for PHP. -To use: +To use: 1. Copy script to munin plugins folder 2. Symbolically link to eacc_memory and eacc_cached * eacc_memory shows memory usage @@ -33,27 +33,27 @@ config = { 'graph_info This graph shows memory performance of PHP eaccelerator module\n' + 'graphs_args -1 0\n' + 'graph_category webserver\n' + - + 'memorysize.label total\n' + 'memorysize.draw AREA\n' + 'memorysize.min 0\n' + 'memorysize.info Total memory\n' + - + 'memoryallocated.label allocated\n' + 'memoryallocated.draw LINE1\n' + 'memoryallocated.min 0\n' + 'memoryallocated.info Memory allocated', - 'cached': + 'cached': 'graph_title eacceleratory cached scripts\n' + 'graph_info This graph shows how many scripts are cached by PHP eaccelerator module\n' + 'graphs_args -1 0\n' + 'graph_category webserver\n' + - + 'cachedscripts.label cached scripts\n' + 'cachedscripts.draw LINE1\n' + 'cachedscripts.min 0\n' + 'cachedscripts.info Cached scripts\n' + - + 'removedscripts.label removed scripts\n' + 'removedscripts.draw LINE1\n' + 'removedscripts.min 0\n' + @@ -68,16 +68,16 @@ def get_stats(): if 'auth_user' in os.environ and 'auth_pwd' in os.environ: fetcher.add_credentials(os.environ['auth_user'], os.environ['auth_pwd']) resp, content = fetcher.request(os.environ["cpanel"]) - + if resp['status'] != '200': content = '0 0 0 0' - + bits = content.split(' ') return {'memorysize': bits[0], 'memoryallocated': bits[1], 'cachedscripts': bits[2], 'removedscripts': bits[3]} def print_stats(command): stats = get_stats() - + for var in command_vars[command]: print "%s.value %s" % (var, stats[var]) @@ -87,19 +87,19 @@ if __name__ == "__main__": except ImportError: print "httplib2 not found" sys.exit(1) - + if os.environ['cpanel'] == '': print "env.cpanel not defined in munin config" sys.exit() underscore = sys.argv[0].find('_') - + if underscore == -1: print "Symbolically link this file to eacc_memory or eacc_cached" sys.exit(1) else: command = sys.argv[0][underscore+1:] - + if len(sys.argv) > 1 and sys.argv[1] != '': if sys.argv[1] == 'config': print_config(command) diff --git a/plugins/php/eaccelerator-usage b/plugins/php/eaccelerator-usage index f95a19d1..090c5256 100755 --- a/plugins/php/eaccelerator-usage +++ b/plugins/php/eaccelerator-usage @@ -89,7 +89,7 @@ if ($resp->is_success) { print "cached.value $v\n" if $b =~ /cached/i; print "removed.value $v\n" if $b =~ /removed/i; print "keys.value $v\n" if $b =~ /keys/i; - + } $b = $_; } diff --git a/plugins/php/php-cgi b/plugins/php/php-cgi index c9b8009e..48a31d05 100755 --- a/plugins/php/php-cgi +++ b/plugins/php/php-cgi @@ -10,7 +10,7 @@ # config (required) # autoconf (optional - used by munin-config) # -# Magick markers (optional - used by munin-config and som installation +# Magick markers (optional - used by munin-config and some installation # scripts): #%# family=manual #%# capabilities=autoconf diff --git a/plugins/php/php5-fpm_status b/plugins/php/php5-fpm_status index 73cc92c4..8af29db8 100755 --- a/plugins/php/php5-fpm_status +++ b/plugins/php/php5-fpm_status @@ -3,7 +3,7 @@ # Plugin to monitor php5-fpm process manager, php5-fpm 5.3.3 is required # 20100726 21:15:39 radar AT aol DOT pl # modified by Daniel Caillibaud on 20110926 -# +# # /etc/php5/fpm/php5-fpm.conf: # # pm.status_path = /fpm-status diff --git a/plugins/php/php_apc_ b/plugins/php/php_apc_ index f0824171..d9be9ce8 100755 --- a/plugins/php/php_apc_ +++ b/plugins/php/php_apc_ @@ -2,7 +2,7 @@ ################################################################# # # Script to monitor apc usage -# +# ################################################################# # # Parameters understood: config, autoconf and suggest @@ -10,9 +10,9 @@ ################################################################# # # Configuration section -# +# # Configuration example -# +# # [php_apc_*] # user root # env.URL http://localhost/php_apc.php # URL to fetch APC status @@ -26,11 +26,11 @@ # ################################################################# ################################################################# -# Settigs required for autoconf +# Settings required for autoconf #%# family=auto #%# capabilities=autoconf suggest -# URL to the script to check APC status (defaults to +# URL to the script to check APC status (defaults to # 'http://localhost/php_apc.php' if not configured) URL=${URL:-'http://localhost/php_apc.php'} @@ -39,8 +39,12 @@ WGET_FLAGS="-Yoff"; # refer to wget manual, you may set extra parameters like di act=`basename $0 | sed 's/^php_apc_//g'` if [ "$1" = "autoconf" ]; then - [ -z "$URL" ] && echo "no (edit URL config in header file !)" && exit 1 - [ -n "$URL" ] && echo "yes" && exit 0 + if [ -z "$URL" ]; then + echo "no (missing URL config in header file)" + else + echo "yes" + fi + exit 0 fi if [ "$1" = "suggest" ]; then diff --git a/plugins/php/php_eaccelerator b/plugins/php/php_eaccelerator index d8a8a818..e5e93354 100755 --- a/plugins/php/php_eaccelerator +++ b/plugins/php/php_eaccelerator @@ -1,6 +1,6 @@ -#!/usr/bin/ruby +#!/usr/bin/env ruby -# Monitor your EAccelerator usage. +# Monitor your EAccelerator usage. # Requires: ruby # Mandatory Parameters @@ -31,15 +31,15 @@ user = ENV['user'] || 'user' pwd = ENV['password'] || 'password' url = ENV['url'] || 'http://127.0.0.1/control.php' -if ARGV[0]=="config" +if ARGV[0] == 'config' print "EAccelerator Monitoring\n" print "graph_title PHP Eaccelerator\n" print "graph_category webserver\n" - print "Memoryusagepercentage.label Memory Usage %\n" - print "Memoryusagepercentage.warning 95\n" - print "Memoryusagepercentage.critical 95\n" - print "Memoryusage.label Memory Usage MB\n" - print "Memorymax.label Cache Size MB\n" + print "Memoryusagepercentage.label Memory Usage %\n" + print "Memoryusagepercentage.warning 95\n" + print "Memoryusagepercentage.critical 95\n" + print "Memoryusage.label Memory Usage MB\n" + print "Memorymax.label Cache Size MB\n" print "Freememory.label Free Memory MB\n" print "Cachedscripts.label Cached Scripts\n" print "Removedscripts.label Removed Scripts\n" @@ -47,58 +47,50 @@ if ARGV[0]=="config" exit end -one_liners=0 -three_liners=0 -key="" +one_liners = 0 +three_liners = 0 +key = '' -open(url, :http_basic_authentication=>[user, pwd]) do |f| +open(url, http_basic_authentication: [user, pwd]) do |f| f.each do |line| - if three_liners>0 - three_liners=three_liners+1 + if three_liners > 0 + three_liners += 1 - if three_liners==2 - print "Memoryusagepercentage.value " - end + print 'Memoryusagepercentage.value ' if three_liners == 2 - if three_liners==3 - print "Memoryusage.value " - end + print 'Memoryusage.value ' if three_liners == 3 - if three_liners==4 - print "Memorymax.value " - end + print 'Memorymax.value ' if three_liners == 4 - print line.gsub!(/[^0-9.]/s,"") + print line.gsub!(/[^0-9.]/s, '') print "\n" end - if one_liners>0 - one_liners=one_liners+1 + if one_liners > 0 + one_liners += 1 print "#{key}.value " - print line.gsub!(/[^0-9.]/s,"") + print line.gsub!(/[^0-9.]/s, '') print "\n" end - if one_liners>1 - line="" - one_liners=0 + if one_liners > 1 + line = '' + one_liners = 0 end - if three_liners>3 - line="" - three_liners=0 + if three_liners > 3 + line = '' + three_liners = 0 end if line =~ /Memory usage/ - key=line.gsub!(/(<[^>]*>)|\n|\t| /s,"") - three_liners=three_liners+1 + key = line.gsub!(/(<[^>]*>)|\n|\t| /s, '') + three_liners += 1 end - if line =~ /Free memory/ || line =~ /Cached scripts/ || line =~ /Removed scripts/ || line =~ /Cached keys/ - key=line.gsub!(/(<[^>]*>)|\n|\t| /s,"") - one_liners=one_liners+1 + if line =~ /Free memory/ || line =~ /Cached scripts/ || line =~ /Removed scripts/ || line =~ /Cached keys/ + key = line.gsub!(/(<[^>]*>)|\n|\t| /s, '') + one_liners += 1 end - end end - diff --git a/plugins/php/php_errors_ b/plugins/php/php_errors_ old mode 100644 new mode 100755 index dd073a58..d02f8064 --- a/plugins/php/php_errors_ +++ b/plugins/php/php_errors_ @@ -30,20 +30,20 @@ Revision 0.1 2011/06/17 12:00:00 Ulrich Lusseau . $MUNIN_LIBDIR/plugins/plugin.sh LOGS=${logfile:-/var/log/apache2/error.log} - - + + if [[ $1 == autoconf ]]; then for LOG in $LOGS; do - if [[ ! -r $LOGS ]]; then - echo no - exit 1 + if [[ ! -r $LOG ]]; then + echo "no (cannot read '$LOG')" + exit 0 fi done echo yes exit 0 fi - + if [[ $1 == config ]]; then echo 'graph_title PHP Errors from ' $LOGS echo 'graph_args --base 1000 -l 0' diff --git a/plugins/php/php_fpm_process b/plugins/php/php_fpm_process index 873b814e..d4ff5191 100755 --- a/plugins/php/php_fpm_process +++ b/plugins/php/php_fpm_process @@ -25,7 +25,7 @@ You have to put this in your plugin.conf.d folder # If your php process is listening on Unix Socket [php_fpm_process] - env.sock /var/run/php5-fpm.sock + env.sock /var/run/php-fpm.sock env.path /status =head1 MAGIC MARKERS @@ -35,15 +35,16 @@ You have to put this in your plugin.conf.d folder =head1 VERSION - v1.0 + v2.0.0 =head1 AUTHOR Minitux +Olivier Mehani =head1 LICENSE -GNU General Public License, version 3 +SPDX-License-Identifier: GPL-3.0 =cut @@ -56,6 +57,10 @@ my $body = ""; my $IDLE = 0; my $ACTIVE = 0; my $TOTAL = 0; +my $LISTEN = 0; +my $MAX = 0; +my $LEN = 0; +my $CONNECTIONS = 0; my $SLOW_REQUESTS = 0; my $PLUGIN_NAME = basename($0); @@ -67,67 +72,104 @@ my $UNIX_SOCK = $ENV{'sock'}; my $sock; if ($UNIX_SOCK) { - use IO::Socket::UNIX; - $sock = IO::Socket::UNIX->new( - Peer => $UNIX_SOCK, - ); - if (!$sock) { - print "Server maybe down, unabled to connect to $UNIX_SOCK"; - exit 2; - } + use IO::Socket::UNIX; + $sock = IO::Socket::UNIX->new( + Peer => $UNIX_SOCK, + ); + if (!$sock) { + print "Server maybe down, unabled to connect to $UNIX_SOCK"; + exit 2; + } } else { - use IO::Socket::INET; - $sock = IO::Socket::INET->new( - PeerAddr => $SERVERADDR, - PeerPort => $PORT, - ); - if (!$sock) { - print "Server maybe down, unabled to connect to $SERVERADDR:$PORT"; - exit 2; - } + use IO::Socket::INET; + $sock = IO::Socket::INET->new( + PeerAddr => $SERVERADDR, + PeerPort => $PORT, + ); + if (!$sock) { + print "Server maybe down, unabled to connect to $SERVERADDR:$PORT"; + exit 2; + } } my $client = FCGI::Client::Connection->new( sock => $sock ); my ( $stdout, $stderr, $appstatus ) = $client->request( - +{ - REQUEST_METHOD => 'GET', - SCRIPT_FILENAME => '', - QUERY_STRING => '', - SCRIPT_NAME => $PATH, - }, - '' - ); + +{ + REQUEST_METHOD => 'GET', + SCRIPT_FILENAME => '', + QUERY_STRING => '', + SCRIPT_NAME => $PATH, + }, + '' +); + +# +# Example output: +# +# pool: www +# process manager: dynamic +# start time: 23/Jun/2019:12:13:50 +0200 +# start since: 577793 +# accepted conn: 37211 +# listen queue: 0 +# max listen queue: 0 +# listen queue len: 0 +# idle processes: 6 +# active processes: 1 +# total processes: 7 +# max active processes: 13 +# max children reached: 0 +# slow requests: 0 + +# +# ...with ?full added to the query string, each child is also described: +# +# ************************ +# pid: 56027 +# state: Running +# start time: 18/Jul/2019:01:02:15 +0200 +# start since: 45279 +# requests: 776 +# request duration: 1043 +# request method: GET +# request URI: /fpm-status?full +# content length: 0 +# user: - +# script: - +# last request cpu: 0.00 +# last request memory: 0 +# $stdout =~ s/\r//g; while($stdout =~ /([^\n]*)\n?/g) { - if(!$1) { - $ish = 0; - next; - } - if($ish == 1) { - $header .= $1."\n"; - } else { - $body .= $1."\n"; - } + if(!$1) { + $ish = 0; + next; + } + if($ish == 1) { + $header .= $1."\n"; + } else { + $body .= $1."\n"; + } } if ( defined $ARGV[0] and $ARGV[0] eq "config" ) { - if($body =~ m/pool:\s+(.*?)\n/) { - $pool = $1; - } + if($body =~ m/pool:\s+(.*?)\n/) { + $pool = $1; + } - print <<"EOF"; + print <<"EOF"; multigraph ${PLUGIN_NAME}_process -graph_title php5-fpm processes for $pool +graph_title php-fpm processes for $pool graph_args --base 1000 -l 0 graph_vlabel Processes graph_scale yes -graph_category processes -graph_info This graph shows the php5-fpm process manager status from pool: $pool +graph_category appserver +graph_info This graph shows the php-fpm process manager status from pool $pool active.label Active processes active.type GAUGE active.draw AREA @@ -140,23 +182,52 @@ total.label Total processes total.type GAUGE total.draw LINE2 total.info The number of idle + active processes +max.label Max processes +max.type GAUGE +max.draw LINE +max.info The maximum number of active processes since FPM has started -multigraph ${PLUGIN_NAME}_slowrequests -graph_title php5-fpm slow requests $pool +multigraph ${PLUGIN_NAME}_queues +graph_title php-fpm queues for $pool graph_args --base 1000 -l 0 -graph_vlabel Slow requests +graph_vlabel Queue graph_scale yes -graph_category processes -graph_info This graph shows the php5-fpm slow request from pool: $pool -slow_requests.label Slow requests -slow_requests.type DERIVE -slow_requests.draw LINE2 -slow_requests.min 0 -slow_requests.info evolution of slow requests +graph_category appserver +graph_info This graph shows the php-fpm queue from pool $pool +listen.label Listen queue +listen.type GAUGE +listen.draw LINE +listen.info The number of pending requests in the queue +max.label Max listen queue +max.type GAUGE +max.draw LINE +max.info The maximum number of pending requests in the queue +len.label Queue len +len.type GAUGE +len.draw LINE +len.info The number of pending connections in the queue + +multigraph ${PLUGIN_NAME}_requests +graph_title php-fpm requests for $pool +graph_args --base 1000 -l 0 +graph_vlabel Requests +graph_scale yes +graph_category appserver +graph_info This graph shows the php-fpm request rate from pool $pool +connections.label Connections +connections.type DERIVE +connections.draw LINE +connections.min 0 +connections.info evolution of connections +slow.label Slow requests +slow.type DERIVE +slow.draw LINE +slow.min 0 +slow.info evolution of slow requests (longer than request_slowlog_timeout) EOF - exit 0 + exit 0 } # print $body; @@ -164,21 +235,42 @@ EOF print "multigraph ${PLUGIN_NAME}_process\n"; if($body =~ m/idle processes: (.*?)\n/) { - $IDLE = $1; - print "idle.value ".$IDLE."\n"; + $IDLE = $1; + print "idle.value ".$IDLE."\n"; } if($body =~ m/active processes: (.*?)\n/) { - $ACTIVE = $1; - print "active.value ".$ACTIVE."\n"; + $ACTIVE = $1; + print "active.value ".$ACTIVE."\n"; } if($body =~ m/total processes: (.*?)\n/) { - $TOTAL = $1; - print "total.value ".$TOTAL."\n"; + $TOTAL = $1; + print "total.value ".$TOTAL."\n"; +} +if($body =~ m/max active processes: (.*?)\n/) { + $MAX = $1; + print "max.value ".$MAX."\n"; } -if($body =~ m/slow requests: (.*?)\n/) { - $SLOW_REQUESTS = $1; - print "\n"; - print "multigraph ${PLUGIN_NAME}_slowrequests\n"; - print "slow_requests.value ".$SLOW_REQUESTS."\n"; +if($body =~ m/listen queue: (.*?)\n/) { + $LISTEN= $1; + print "multigraph ${PLUGIN_NAME}_queues\n"; + print "listen.value ".$LISTEN."\n"; + if($body =~ m/max listen queue: (.*?)\n/) { + $MAX = $1; + print "max.value ".$MAX."\n"; + } + if($body =~ m/listen queue len: (.*?)\n/) { + $LEN = $1; + print "len.value ".$LEN."\n"; + } +} + +print "multigraph ${PLUGIN_NAME}_requests\n"; +if($body =~ m/accepted conn: (.*?)\n/) { + $CONNECTIONS = $1; + print "connections.value ".$CONNECTIONS."\n"; +} +if($body =~ m/slow requests: (.*?)\n/) { + $SLOW_REQUESTS = $1; + print "slow.value ".$SLOW_REQUESTS."\n"; } diff --git a/plugins/php/php_opcache b/plugins/php/php_opcache index 4055d72f..da27a69c 100755 --- a/plugins/php/php_opcache +++ b/plugins/php/php_opcache @@ -10,7 +10,7 @@ # [php_opcache] # env.URL http://example/php_opcache.php ############################################################################### -# Settigs required for autoconf +# Settings required for autoconf #%# family=auto #%# capabilities=autoconf suggest @@ -21,8 +21,12 @@ WGET_FLAGS="-Yoff"; # refer to wget manual, you may set extra parameters like di act=memory if [ "$1" = "autoconf" ]; then - [ -z "$URL" ] && echo "no (edit URL config in header file !)" && exit 1 - [ -n "$URL" ] && echo "yes" && exit 0 + if [ -z "$URL" ]; then + echo "no (missing URL config in header file)" + else + echo "yes" + fi + exit 0 fi if [ "$1" = "suggest" ]; then diff --git a/plugins/php/php_opcache.php b/plugins/php/php_opcache.php old mode 100644 new mode 100755 index a4437a35..b45a0c14 --- a/plugins/php/php_opcache.php +++ b/plugins/php/php_opcache.php @@ -3,7 +3,7 @@ * Part of Munin PHP OPcache plugin - Refer to php_opcache for installation instructions. */ -if (function_exists('opcache_get_status')) +if (function_exists('opcache_get_status')) { $data = opcache_get_status(false); $output = array( diff --git a/plugins/php/php_sessions b/plugins/php/php_sessions index eb947f42..2950b9d3 100755 --- a/plugins/php/php_sessions +++ b/plugins/php/php_sessions @@ -43,11 +43,10 @@ if [ "$1" = "autoconf" ]; then test -d "$SESSDIR" > /dev/null 2>&1 if [ $? ]; then echo yes - exit 0 else echo "no (session directory not found)" - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/php/php_time_execution b/plugins/php/php_time_execution old mode 100644 new mode 100755 index bae1252f..5564e510 --- a/plugins/php/php_time_execution +++ b/plugins/php/php_time_execution @@ -7,7 +7,7 @@ # Min, Max and Avg are calculated on number of page, default 10. On high traffic site, increase this value and you get a better # stat, on low traffic site keep small value, it's must be avg number of page every 5 minutes. # -# Require read permitions for $LOG +# Require read permissions for $LOG # (set in /etc/munin/plugin-conf.d/munin-node on debian) # On busy servers you can change value type to COUNTER and set min to 0 to avoid minus peaks at logrotate # @@ -31,28 +31,27 @@ #env.sitename mon-code #env.nbrpage 10 # - + LOG=${logfile:-/var/log/apache2/access.log} NAME=${sitename:undefined} NBRPAGE=${nbrpage} - - + + if [ "$1" = "autoconf" ]; then if [ -r "$LOG" ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi - + if [ "$1" = "config" ]; then - + echo 'graph_title Time to generate PHP page ' $NAME 'v2' echo 'graph_args --base 1000 -l 0' echo 'graph_vlabel Time in microsecond' - + echo "graph_category webserver" echo "graph_info This graph shows load time in ms of $target" echo "minloadtime.label Min time" @@ -61,9 +60,9 @@ if [ "$1" = "config" ]; then echo "avgloadtime.info Avg time" echo "maxloadtime.label Max time" echo "maxloadtime.info Max time" - + exit 0 fi - + awk '($4 ~ /[0-9]+\/[0-9]+/ && $8 !~ /\.(jpg|JPG|jpeg|JPEG|gif|GIF|png|PNG|txt|TXT|css|CSS|js|JS|zip|ZIP|bmp|BMP)$/)' $LOG | sed -e :a -e '$q;N;'$NBRPAGE',$D;ba' | awk '{print $4}' | awk -F\/ ' MIN=="" || $2 < MIN {MIN=$2} MAX=="" || $2 > MAX {MAX=$2} {SUM += $2} END {print "minloadtime.value ",MIN/1000,"\navgloadtime.value ",SUM/(NR*1000),"\nmaxloadtime.value ",MAX/1000}' - + diff --git a/plugins/php/php_xcache b/plugins/php/php_xcache index 148ad864..bfa3cc38 100755 --- a/plugins/php/php_xcache +++ b/plugins/php/php_xcache @@ -7,7 +7,7 @@ require LWP::UserAgent; # # Installation / Configuration # -# - place munin_xcache.php in a directory on your webserver +# - place munin_xcache.php in a directory on your webserver # - add the url config to plugin-conf.d/munin-node # # diff --git a/plugins/ping/ping-day.png b/plugins/ping/example-graphs/ping-day.png similarity index 100% rename from plugins/ping/ping-day.png rename to plugins/ping/example-graphs/ping-day.png diff --git a/plugins/ping/fping_ b/plugins/ping/fping_ index 2c7efd4a..f7c66d80 100755 --- a/plugins/ping/fping_ +++ b/plugins/ping/fping_ @@ -4,7 +4,7 @@ # Author : Thomas VIAL # Author URL : http://tvi.al # Usage : ln -s /path/to/fping_ /etc/munin/plugins/fping_www.google.com -# Explanation : Will graph connection to www.google.com +# Explanation : Will graph connection to www.google.com # Requirements : # * fping # @@ -47,5 +47,5 @@ if [ $status -eq 0 ]; then else # Failure echo "success.value 0" - echo "failure.value 100" + echo "failure.value 100" fi diff --git a/plugins/ping/multi_tcp_ping b/plugins/ping/multi_tcp_ping index 1ab779a4..a1b1dbeb 100755 --- a/plugins/ping/multi_tcp_ping +++ b/plugins/ping/multi_tcp_ping @@ -91,7 +91,7 @@ $cmd_arg = $ARGV[0] || ''; config() if($cmd_arg eq "config"); autoconf() if ($cmd_arg eq 'autoconf'); -for my $host (@hosts) { +for my $host (@hosts) { threads->new(\&ping_host, $host) } @@ -105,9 +105,12 @@ sub ping_host { $p=Net::Ping->new("tcp", $defaults{timeout}); $p->hires(); + $p->service_check(1); $p->{port_num} = $host->[1] || $defaults{port}; - ($ret, $time, $ip) = $p->ping($host->[0]); + eval { + ($ret, $time, $ip) = $p->ping($host->[0]); + }; $time = $defaults{unreachable} if !$ret; print "${addr}.value $time\n"; diff --git a/plugins/ping/multiping b/plugins/ping/multiping index 1fb56458..38d3b1fc 100755 --- a/plugins/ping/multiping +++ b/plugins/ping/multiping @@ -68,11 +68,10 @@ if ((exists $ARGV[0]) && ($ARGV[0] eq "autoconf")) { my $ping = join(" ", @ping); if ($ping =~ m@min/avg/max@) { print "yes\n"; - exit 0; } else { print "no\n"; - exit 1; } + exit 0; } if ((exists $ARGV[0]) && ($ARGV[0] eq "config")) { @@ -82,7 +81,7 @@ if ((exists $ARGV[0]) && ($ARGV[0] eq "config")) { print "graph_category network\n"; print "graph_info This graph shows ping RTT statistics.\n"; for (my $site=1; $site<=$#hosts+1; $site++) { - my $item = lc($hosts[$site-1]); + my $item = lc($hosts[$site-1]); $item =~ s/\.//g; print "$item.label $names[$site-1]\n"; print "$item.info Ping RTT statistics for $hosts[$site-1].\n"; @@ -94,7 +93,7 @@ if ((exists $ARGV[0]) && ($ARGV[0] eq "config")) { } for (my $site=1; $site<=$#hosts+1; $site++) { - my $item = lc($hosts[$site-1]); + my $item = lc($hosts[$site-1]); $item =~ s/\.//g; my $host = $hosts[$site-1]; my @ping = `$ping_cmd $ping_args $host $ping_args2`; diff --git a/plugins/ping/ping-with-ceil b/plugins/ping/ping-with-ceil index 5735e10a..d9f10c05 100755 --- a/plugins/ping/ping-with-ceil +++ b/plugins/ping/ping-with-ceil @@ -16,7 +16,7 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Python (2.5+) version of the plugin to monitor ping times. +# Python (2.5+) version of the plugin to monitor ping times. # Evolution from the standard shipped with munin by adding a ceil. # # Thanks to "Jimmy Olsen" for the base. @@ -82,5 +82,5 @@ for line in out.split("\n"): print "ping.value", "%.6f" % (v / 1000) EOF -) | python +) | python diff --git a/plugins/ping/pinger b/plugins/ping/pinger index 16db1279..8a4fab3c 100755 --- a/plugins/ping/pinger +++ b/plugins/ping/pinger @@ -67,11 +67,10 @@ case $1 in which ping if [[ "$?" = "0" ]]; then echo yes - exit 0 else echo "no (ping not present)" - exit 1 fi + exit 0 ;; config) cat << EOM diff --git a/plugins/poseidon/snmp__poseidon-sensors b/plugins/poseidon/snmp__poseidon-sensors index 0fe28a8d..82758480 100755 --- a/plugins/poseidon/snmp__poseidon-sensors +++ b/plugins/poseidon/snmp__poseidon-sensors @@ -26,10 +26,10 @@ #%# family=auto snmpauto contrib #%# capabilities=snmpconf -E_OK="0" # everything went allright +E_OK="0" # everything went alright E_UNKNOWN="1" # "catch all" for otherwise unhandled errors -E_ARG="81" # invalid argument +E_ARG="81" # invalid argument E_USAGE="82" # wrong program name or arguments E_SNMPGET="83" # error while executing the 'snmpget' utility @@ -57,12 +57,12 @@ SENS_VALUE_OID=".1.3.6.1.4.1.21796.3.3.3.1.6." # integer (decimal * 10) SENS_ID_OID=".1.3.6.1.4.1.21796.3.3.99.1.2.1.4." # unique sensor ID (integer) #+ representation of the #+ temperature (integer) -SENS_UNIT_OID=".1.3.6.1.4.1.21796.3.3.3.1.9." # 0=°C,1=°F,2=°K,3=%,4=V,5=mA, +SENS_UNIT_OID=".1.3.6.1.4.1.21796.3.3.3.1.9." # 0=°C,1=°F,2=°K,3=%,4=V,5=mA, RTS_OUTPUT_OID=".1.3.6.1.4.1.21796.3.3.2.1.2." # binary input state (integer) #+ 6=unknown, 7=pulse, 8=switch # define some Poseidon specific stuff: -STATE_OK="1" +STATE_OK="1" STATE_WARN="2" STATE_CRIT="3" UNITS=("C" "F" "K" "%" "V" "mA" "unknown" "pulse" "switch") @@ -100,13 +100,13 @@ EOT cat <<- EOT multigraph $sensorType graph_title $sensorLocation - $sensorType [$sensorUnit] -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel $sensorUnit graph_category sensors graph_info This graph shows $sensorType history. EOT fi - _firstSensor="0" + _firstSensor="0" cat <<- EOT $sensorType$_sensorNr.label $sensorName $sensorType$_sensorNr.info This graph shows $sensorType$_sensorNr history. @@ -158,7 +158,7 @@ EOT # getSensorData(sensorNr) # fetch state, value, name unit and sensor type for the sensor -#+ with the number "sensorNr" +#+ with the number "sensorNr" getSensorData() { _sensorNr=$1 sensorState=`snmpGet $hostAddr $SENS_STATE_OID$_sensorNr || err \ @@ -172,7 +172,7 @@ getSensorData() { } # getSystemInfo() -# fetch general information about the system +# fetch general information about the system getSystemInfo() { sensorLocation="`snmpGet $hostAddr $SYS_LOC_OID`" } @@ -185,22 +185,22 @@ snmpGet () { _host="$1" _exit="0" - # fetch the requested OID + # fetch the requested OID _longValue="`snmpget -O v\ -m $MIBS\ -v $SNMPVERSION\ -c $SNMPCOMMUNITY\ $_host $_oid 2>/dev/null`" - + _exitStatus="$?" echo ${_longValue#*:} # remove the type from the answer return $_exitStatus -} +} # get unit (string) -#+ find out the unit of the output of a given sensor. possible units are: -#+ "C" "F" "K" "%" "V" "mA" "unknown" "pulse" "switch" +#+ find out the unit of the output of a given sensor. possible units are: +#+ "C" "F" "K" "%" "V" "mA" "unknown" "pulse" "switch" getSensorUnitString () { _sensorNr=$1 _sensorUnit=`snmpGet $hostAddr $SENS_UNIT_OID$_sensorNr || err \ @@ -209,7 +209,7 @@ getSensorUnitString () { } # get type (string) -#+ find out what type of sensor we are dealing with. possible types are: +#+ find out what type of sensor we are dealing with. possible types are: #+ "Temp" "Hum" "Volt" "Curr" "Unkn" "Pulse" "Switch" getSensorType () { _sensorNr=$1 @@ -223,19 +223,19 @@ getSensorType () { #+ in the array for the respective unit getAvailableSensorsByType () { _thisSensorNr="1" - + # initial fetch _snmpget=`snmpGet $hostAddr $SENS_UNIT_OID$_thisSensorNr` _nextSensorExits=$? _unit=`echo "$_snmpget" | tr -d " "` - # add next sensor if it exists - while [ true ]; do + # add next sensor if it exists + while [ true ]; do # add sensors of the same type to a list - sensorsOfType[$_unit]="${sensorsOfType[$_unit]} $_thisSensorNr" - + sensorsOfType[$_unit]="${sensorsOfType[$_unit]} $_thisSensorNr" + # fetch next sensor - _thisSensorNr=$(($_thisSensorNr+1)) + _thisSensorNr=$(($_thisSensorNr+1)) _snmpget=`snmpGet $hostAddr $SENS_UNIT_OID$_thisSensorNr` _nextSensorExits=$? @@ -243,7 +243,7 @@ getAvailableSensorsByType () { if [ $_nextSensorExits -ne 0 ]; then break fi - + _unit=`echo "$_snmpget" |cut -d" " -f 4` done } @@ -264,7 +264,7 @@ sanitize () { } # usage () -# print usage +# print usage usage () { echo "usage: snmp__poseidon-sensors [config|snmpconf]" 1>&2 exit $E_USAGE @@ -279,7 +279,7 @@ err () { else _errorMsg="Fatal: An unknown error occurred! Exiting..." _exitCode="$E_UNKNOWN" - + fi # print error message to STDERR ... @@ -291,13 +291,13 @@ err () { #==============================================================================# # SNMP Config -MIBS=":" # we don't use any configured MIBs so we don't +MIBS=":" # we don't use any configured MIBs so we don't #+ have to deal with errors in the MIBs -if [ -z $SNMPVERSION ]; then +if [ -z $SNMPVERSION ]; then SNMPVERSION="1" # as of firmware 3.1.5 only SNMPv1 is supported fi -if [ -z $SNMPCOMMUNITY ]; then +if [ -z $SNMPCOMMUNITY ]; then SNMPCOMMUNITY="public" # SNMP community string to read from the device fi @@ -326,11 +326,11 @@ hostAddr="`basename "$0" | cut -d "_" -f 2`" if [ "$myName" = "snmp_poseidon-sensors" ]; then hostAddr=`sanitize $hostAddr || err \ "Fatal: Invalid argument \"$hostAddr\"! Exiting..." $E_ARG` - if [ -z "$hostAddr" ]; then + if [ -z "$hostAddr" ]; then usage munin fi getAvailableSensorsByType - if [ "$1" = "config" ]; then + if [ "$1" = "config" ]; then printMuninConfig exit $E_OK elif [ "$1" = "snmpconfig" ]; then diff --git a/plugins/postfix/greyfix b/plugins/postfix/greyfix index 87d0d4fd..75fd84c3 100755 --- a/plugins/postfix/greyfix +++ b/plugins/postfix/greyfix @@ -21,7 +21,7 @@ $ ln -s /path/to/plugin/greyfix /etc/munin/plugins/greyfix Configuration ============= -There are some settings that can be tweaked by adding statements to the +There are some settings that can be tweaked by adding statements to the munin-node config: [greyfix] @@ -36,10 +36,10 @@ env.num_steps 47 # graph the greylisted triplets separate from the whitelisted ones (default: yes) env.greylist_step no -Please note that the last step has no end date, so it includes all triplets -older than the second last step. I.e., the defaults (as named above) create a -graph that shows 10 steps of one week each, and one last step for everything -older than 10 weeks. Also, the separate greylist step is not considered +Please note that the last step has no end date, so it includes all triplets +older than the second last step. I.e., the defaults (as named above) create a +graph that shows 10 steps of one week each, and one last step for everything +older than 10 weeks. Also, the separate greylist step is not considered when applying num_steps. """ @@ -72,7 +72,7 @@ def greyfix_parse_triplets(): if greyfix.returncode > 0: print '# greyfix exited with exit code %i' % (greyfix.returncode) sys.exit(greyfix.returncode) - + triplets = [] for line in stdout.split("\n"): triplet = line.split("\t") diff --git a/plugins/postfix/policyd-spf-python b/plugins/postfix/policyd-spf-python index 93c53b5e..233a3c04 100755 --- a/plugins/postfix/policyd-spf-python +++ b/plugins/postfix/policyd-spf-python @@ -1,14 +1,14 @@ #! /bin/bash # # Munin plugin to monitor postfix-policyd-spf-python results -# Contributed by Alexander Koch +# Contributed by Alexander Koch # # This plugin is published under the terms of the MIT License. -# +# # Parameters understood: # config (required) # autoconf (optional - used by munin-config) -# +# # Config variables: # logfile - Where to find the postfix log (mail.log) # @@ -69,7 +69,7 @@ fi # function get_log_count() { - egrep "policyd-spf\[[0-9]+\]: $1;" "$LOGFILE" | grep "$(date '+%b %e')" | wc -l + egrep "policyd-spf\[[0-9]+\]:(.*) $1" "$LOGFILE" | grep "$(date '+%b %e')" | wc -l } PASS=$(get_log_count "Pass") diff --git a/plugins/postfix/postfix-policyd b/plugins/postfix/postfix-policyd index fb094740..aa6c793c 100755 --- a/plugins/postfix/postfix-policyd +++ b/plugins/postfix/postfix-policyd @@ -15,11 +15,10 @@ MYSQL_DB="postfixpolicyd" if [ "$1" = "autoconf" ]; then if [ -n "${MYSQL_PASS}" ] ; then echo yes - exit 0 else echo "no (set mysql pass)" - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/postfix/postfix-queue-size b/plugins/postfix/postfix-queue-size index f471b260..df5092fe 100755 --- a/plugins/postfix/postfix-queue-size +++ b/plugins/postfix/postfix-queue-size @@ -81,7 +81,7 @@ munin-node. =cut -# atempt to get spooldir via postconf, but environment overrides. +# attempt to get spooldir via postconf, but environment overrides. # Remember that postconf is not available unless postfix is. POSTCONFSPOOL="$(postconf -h queue_directory 2>/dev/null || echo /var/spool/postfix)" @@ -91,7 +91,7 @@ SPOOLDIR=${spooldir:-$POSTCONFSPOOL} case $1 in autoconf|detect) - + if [ -d $SPOOLDIR ] ; then echo yes exit 0 diff --git a/plugins/postfix/postfix-rbl-blocked-mails b/plugins/postfix/postfix-rbl-blocked-mails index 73fa2794..3517fed3 100755 --- a/plugins/postfix/postfix-rbl-blocked-mails +++ b/plugins/postfix/postfix-rbl-blocked-mails @@ -1,45 +1,48 @@ -#!/bin/bash +#!/bin/sh # -# Made by Stefan Bhler, Switzerland +# Made by Stefan Bühler, Switzerland # Monitor blocked Mails during Postfix RBL Scan, included Spamhaus, Spamcop, Manitu, MSRBL, NJABL -LOGFILE=${logfile:-/var/log/mail.log} # Allow user to specify logfile through env.logfile -DATE=`date '+%b %e %H'` -MAXLABEL=20 +# Allow user to specify logfile through env.logfile +LOGFILE=${logfile:-/var/log/mail.log} +DATE=$(date '+%b %e %H') +RBLS=${rbls:-spamhaus.org spamcop.net manitu.net msrbl.net njabl.org} + + +get_blocked_by_domain_count() { + local escaped_domain + # escape dots - for a proper regular expression + escaped_domain=$(echo "$1" | sed 's/\./\\./g') + grep -c "$DATE.*blocked using [^ ]*${escaped_domain}" "$LOGFILE" +} + if [ "$1" = "autoconf" ]; then - if [[ -r $LOGFILE ]]; then - echo yes - else - echo no - fi - exit 0 + if [ -r "$LOGFILE" ]; then + echo yes + else + echo "no (log file not found: $LOGFILE)" + fi + exit 0 fi if [ "$1" = "config" ]; then + echo 'graph_title RBL Counter' + echo 'graph_category mail' + echo 'graph_args --base 1000 -l 0' + echo 'graph_vlabel block during RBL' - echo 'graph_title RBL Counter' - echo 'graph_category mail' - echo 'graph_args --base 1000 -l 0' - echo 'graph_vlabel block during RBL' - echo 'spamhaus.label Blocked by Spamhaus.org' - echo 'spamcop.label Blocked by Spamcop' - echo 'manitu.label Blocked by manitu.net' - echo 'msrbl.label Blocked by msrbl.net' - echo 'njabl.label Blocked by njabl.org' - exit 0 + for RBL in $RBLS + do + echo "${RBL%%.*}.label Blocked by $RBL" + done + + exit 0 fi -echo -en "spamhaus.value " -echo $(grep "blocked using sbl-xbl.spamhaus.org" $LOGFILE | grep "$DATE" | wc -l) -echo -en "spamcop.value " -echo $(grep "blocked using bl.spamcop.net" $LOGFILE | grep "$DATE" | wc -l) -echo -en "manitu.value " -echo $(grep "blocked using ix.dnsbl.manitu.net" $LOGFILE | grep "$DATE" | wc -l) -echo -en "msrbl.value " -echo $(grep "blocked using combined.rbl.msrbl.net" $LOGFILE | grep "$DATE" | wc -l) -echo -en "njabl.value " -echo $(grep "blocked using combined.njabl.org" $LOGFILE | grep "$DATE" | wc -l) - +for RBL in $RBLS +do + printf '%s.value %s\n' "${RBL%%.*}" "$(get_blocked_by_domain_count $RBL)" +done \ No newline at end of file diff --git a/plugins/postfix/postfix_filtered b/plugins/postfix/postfix_filtered index 8cb70370..22eb0200 100755 --- a/plugins/postfix/postfix_filtered +++ b/plugins/postfix/postfix_filtered @@ -17,9 +17,9 @@ # env.policy my policy string # # When env.policy is set, this plugin will match the string you supply as env.policy and return the number of instances -# of that string as an output called "policy.value". +# of that string as an output called "policy.value". # -# If you are NOt using a postfix policy daemon, as above, use the line +# If you are NOt using a postfix policy daemon, as above, use the line # # [postfix*] # env.policy none @@ -29,7 +29,7 @@ -POLICY=${policy} +POLICY=${policy} if [ "$POLICY" = "none" ] then POLICY="" @@ -44,11 +44,10 @@ STATEFILE=$MUNIN_PLUGSTATE/postfix_mailfiltered.offset if [ "$1" = "autoconf" ]; then if [ -f "${MAIL_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -67,11 +66,11 @@ if [ "$1" = "config" ]; then echo 'allowed.label Allowed' echo 'allowed.min 0' echo 'allowed.type ABSOLUTE' - + echo 'rbl.label RBL blocked' echo 'rbl.min 0' echo 'rbl.type ABSOLUTE' - + if [ -z "$POLICY" ] then echo "empty" > /dev/null @@ -84,15 +83,15 @@ if [ "$1" = "config" ]; then echo 'helo.label HELO rejected' echo 'helo.min 0' echo 'helo.type ABSOLUTE' - + echo 'client.label Client rejected' echo 'client.min 0' echo 'client.type ABSOLUTE' - + echo 'sender.label Sender rejected' echo 'sender.min 0' echo 'sender.type ABSOLUTE' - + echo 'recipient.label Recipient unknown' echo 'recipient.min 0' echo 'recipient.type ABSOLUTE' diff --git a/plugins/postfix/postfix_filtered_awk b/plugins/postfix/postfix_filtered_awk index fde5dbc0..08b17d1c 100755 --- a/plugins/postfix/postfix_filtered_awk +++ b/plugins/postfix/postfix_filtered_awk @@ -17,9 +17,9 @@ # env.policy my policy string # # When env.policy is set, this plugin will match the string you supply as env.policy and return the number of instances -# of that string as an output called "policy.value". +# of that string as an output called "policy.value". # -# If you are NOT using a postfix policy daemon, as above, use the line +# If you are NOT using a postfix policy daemon, as above, use the line # # [postfix_filtered] # env.policy none @@ -40,11 +40,10 @@ STATEFILE=$MUNIN_PLUGSTATE/postfix_mailfiltered_test.offset if [ "$1" = "autoconf" ]; then if [ -f "${MAIL_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -58,7 +57,7 @@ if [ "$1" = "config" ]; then if [ -z "$POLICY" ] then echo 'graph_order rbl helo client sender recipient relay allowed' - + else echo 'graph_order rbl policy helo client sender recipient relay allowed' echo 'policy.label policy blocked' @@ -95,21 +94,16 @@ if [ "$1" = "config" ]; then fi - - $LOGTAIL ${MAIL_LOG} $STATEFILE | \ -awk 'BEGIN { na= 0; nb= 0; nc= 0; nd= 0; ne= 0; nf= 0; ng= 0; nh= 0 ; st= ENVIRON["POLICY"] } - - { - if (index($0, "queued as")) { na++ } - else if (index($0, "Relay access denied")) { nb++ } - else if (index($0, "blocked using")) { nc++ } - else if (index($0, "Helo command rejected")) { nd++ } - else if (index($0, "Client host rejected")) { ne++ } - else if (index($0, "Sender address rejected")) { nf++ } - else if (index($0, "Recipient address rejected")) { ng++ } - else if (st && index($0, st)) { nh++ } - } - END { print "allowed.value " na"\nrelay.value " nb"\nrbl.value " nc"\nhelo.value " nd"\nclient.value " ne"\nsender.value " nf"\nrecipient.value " ng ; if (st) print "policy.value " nh }' - - +awk 'BEGIN { allowed_count=0; relay_denied_count=0; rbl_count=0; helo_rejected_count=0; client_rejected_count=0; sender_rejected_count=0; reciepient_rejected_count=0; policy_count=0 ; st=ENVIRON["POLICY"] } + { + if (index($0, "queued as")) { allowed_count++ } + else if (index($0, "Relay access denied")) { relay_denied_count++ } + else if (index($0, "blocked using")) { rbl_count++ } + else if (index($0, "Helo command rejected")) { helo_rejected_count++ } + else if (index($0, "Client host rejected")) { client_rejected_count++ } + else if (index($0, "Sender address rejected")) { sender_rejected_count++ } + else if (index($0, "Recipient address rejected")) { reciepient_rejected_count++ } + else if (st && index($0, st)) { policy_count++ } + } + END { print "allowed.value " allowed_count"\nrelay.value " relay_denied_count"\nrbl.value " rbl_count"\nhelo.value " helo_rejected_count"\nclient.value " client_rejected_count"\nsender.value " sender_rejected_count"\nrecipient.value " reciepient_rejected_count ; if (st) print "policy.value " policy_count }' diff --git a/plugins/postfix/postfix_mailfiltered b/plugins/postfix/postfix_mailfiltered index f96687df..c0ef6d6e 100755 --- a/plugins/postfix/postfix_mailfiltered +++ b/plugins/postfix/postfix_mailfiltered @@ -10,8 +10,8 @@ mktempfile () { -mktemp -t -} + mktemp -t +} MAIL_LOG=${logfile:-/var/log/mail.log} LOGTAIL=${logtail:-`which logtail`} @@ -20,11 +20,10 @@ STATEFILE=$MUNIN_PLUGSTATE/postfix_mailfiltered.offset if [ "$1" = "autoconf" ]; then if [ -f "${MAIL_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/postfix/postfix_mailqueue_ b/plugins/postfix/postfix_mailqueue_ index c2d4cb7b..abd110f6 100755 --- a/plugins/postfix/postfix_mailqueue_ +++ b/plugins/postfix/postfix_mailqueue_ @@ -82,7 +82,7 @@ munin-node. =cut -# atempt to get spooldir via postconf, but environment overrides. +# attempt to get spooldir via postconf, but environment overrides. # Remember that postconf is not available unless postfix is. CONFIG=${0##*postfix_mailqueue_} diff --git a/plugins/postfix/postfix_mailqueuelog_ b/plugins/postfix/postfix_mailqueuelog_ index 4ccc8f6a..a4be671a 100755 --- a/plugins/postfix/postfix_mailqueuelog_ +++ b/plugins/postfix/postfix_mailqueuelog_ @@ -171,7 +171,7 @@ foreach my $i (@status_list) #if(-l $statefile) { # die("$statefile is a symbolic link, refusing to touch it."); -#} +#} #open (OUT, '>', $statefile) or die "Unable to open statefile: $!\n"; #print OUT "sum:$sum\n"; #foreach my $i (@status_list) @@ -180,8 +180,8 @@ foreach my $i (@status_list) #} #close OUT; -sub parseLogfile -{ +sub parseLogfile +{ my ($fname) = @_; # the search parts diff --git a/plugins/postfix/postfix_mailstats b/plugins/postfix/postfix_mailstats index c21e2df2..7d2c518e 100755 --- a/plugins/postfix/postfix_mailstats +++ b/plugins/postfix/postfix_mailstats @@ -62,7 +62,7 @@ sub autoconf } else { print "no (could not find logdir)\n"; } - exit 1; + exit 0; } sub config diff --git a/plugins/postfix/postfix_mailstats_ b/plugins/postfix/postfix_mailstats_ index b3a7f489..64e074ec 100755 --- a/plugins/postfix/postfix_mailstats_ +++ b/plugins/postfix/postfix_mailstats_ @@ -10,7 +10,7 @@ rejected by postfix Uses the last part of the symlink name for grepping the correct data from the postfix log file. The name must be syslog_name from the postfix config. -The environment settings still applay to this plugin. +The environment settings still apply to this plugin. Configuration parameters for /etc/munin/postfix_mailstats_, if you need to override the defaults below: @@ -169,7 +169,7 @@ foreach my $i (sort keys %{$rejects}) if (-l $statefile) { die ("$statefile is a symbolic link, refusing to touch it."); -} +} open (OUT, '>', $statefile) or die "Unable to open statefile: $!\n"; print OUT "$pos:$delivered\n"; foreach my $i (sort keys %{$rejects}) @@ -178,31 +178,31 @@ foreach my $i (sort keys %{$rejects}) } close OUT; -sub parseLogfile -{ +sub parseLogfile +{ my ($fname, $start, $stop) = @_; open (LOGFILE, $fname) or die "Unable to open logfile $fname for reading: $!\n"; seek (LOGFILE, $start, 0) or die "Unable to seek to $start in $fname: $!\n"; - while (tell (LOGFILE) < $stop) + while (tell (LOGFILE) < $stop) { my $line = ; chomp ($line); if ($line =~ /$postfix\/qmgr.*from=.*size=[0-9]*/ || - $line =~ /$postfix\/smtp.* status=sent /) + $line =~ /$postfix\/smtp.* status=sent /) { $delivered++; - } + } elsif ($line =~ /$postfix\/smtpd.*reject: \S+ \S+ \S+ (\S+)/ || $line =~ /$postfix\/cleanup.* reject: (\S+)/) { $rejects->{$1}++; } } - close(LOGFILE) or warn "Error closing $fname: $!\n"; + close(LOGFILE) or warn "Error closing $fname: $!\n"; } # vim:syntax=perl diff --git a/plugins/postfix/postfix_mailvolume_multi b/plugins/postfix/postfix_mailvolume_multi index 4e7401e5..03192515 100755 --- a/plugins/postfix/postfix_mailvolume_multi +++ b/plugins/postfix/postfix_mailvolume_multi @@ -133,7 +133,6 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") if (-r "$logfile") { print "yes\n"; - exit 0; } else { @@ -145,7 +144,7 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") print "no (logfile '$logfile' not found)\n"; } } - else + else { print "no (postfix not found)\n"; } @@ -193,7 +192,7 @@ if (!$pos) # No state file present. Avoid startup spike: Do not read log # file up to now, but remember how large it is now, and next # time read from there. - + $pos = (stat $logfile)[7]; # File size foreach $syslog_name (@postfix_syslog_name) { diff --git a/plugins/postfix/postfix_stats b/plugins/postfix/postfix_stats index 89ac0fdb..5a901cf7 100755 --- a/plugins/postfix/postfix_stats +++ b/plugins/postfix/postfix_stats @@ -21,11 +21,11 @@ There is no default configuration. This is an example config for Ubuntu: env.pflogsumm pflogsumm env.logfiles contains space separated syslog logfiles, usually current log -and previous log. You can add more log files if you want, but this may +and previous log. You can add more log files if you want, but this may increase the time required for analysis. env.pflogsumm The "pflogsumm" script, can be pflogsumm.pl if it was manually -downloaded and installed, or "pflogsumm" if it was installed by a package +downloaded and installed, or "pflogsumm" if it was installed by a package manager (like apt-get). Use the last part of the symlink name for filtering by hostname from logfile, @@ -76,7 +76,7 @@ MAIL_LOG="${logfiles:-/var/log/mail.log /var/log/mail.log.1}" FILTER_HOSTNAME=$(basename "$0" | sed -r 's/postfix_stats_?//') # shellcheck disable=SC2154 -PFLOGSUMM="${pflogsum}" +PFLOGSUMM="${pflogsumm}" [ -z "$PFLOGSUMM" ] && PFLOGSUMM="$(which pflogsumm pflogsumm.pl | head -1)" # Fields (Array to avoid code duplication) must be space separated @@ -89,7 +89,7 @@ if [ "$1" = 'autoconf' ]; then # Check if pflogsumm exist if [ -z "${PFLOGSUMM}" ] then - echo 'no (pflogsum not found in your system)' + echo 'no (pflogsumm not found in your system)' else echo 'yes' fi @@ -136,7 +136,7 @@ else fi # Parse value from Raw result -# +# # Return digit if regex are parsed correctly # # Return U (undefined) if any error occurs diff --git a/plugins/postfix/postfwd2 b/plugins/postfix/postfwd2 index edae34a8..bd5379b0 100755 --- a/plugins/postfix/postfwd2 +++ b/plugins/postfix/postfwd2 @@ -25,12 +25,12 @@ This are perl regexp. If env.include is set and env.exclude is not, only the policy which name - matchs will be used. + matches will be used. If env.exclude is set and env.include is not, only the policy which name NOT - matchs will be used. - If both are set, a policy which name matchs the both regex will be used, a - policy which matchs only the exclude regexp will NOT be used and a policy - which match not the exclude regex will be used, even if it not matchs the + matches will be used. + If both are set, a policy which name matches the both regex will be used, a + policy which matches only the exclude regexp will NOT be used and a policy + which match not the exclude regex will be used, even if it not matches the include regexp. if none are set, all the policy will be used. @@ -156,7 +156,7 @@ if (!defined($ENV{path}) || !(-x $ENV{path})) { } munin_exit_fail() unless (defined($ENV{path}) && -x $ENV{path}); -##### I have to parse the output BEFORE config, since policy matchs are dependent of the postfwd --dumpstats output +##### I have to parse the output BEFORE config, since policy matches are dependent of the postfwd --dumpstats output open(DATA, $ENV{path}.' --dumpstats |') or munin_exit_fail(); my $total_requests; while(defined (my $data = )) { @@ -252,7 +252,7 @@ munin_exit_done(); # ## -### INTERNALS FONCTIONS +### INTERNALS FUNCTIONS ############################################################################### sub munin_exit_done { munin_exit(0); diff --git a/plugins/postfix/postgrey b/plugins/postfix/postgrey index e5c9ebc6..be90e875 100755 --- a/plugins/postfix/postgrey +++ b/plugins/postfix/postgrey @@ -10,8 +10,8 @@ mktempfile () { -mktemp -t -} + mktemp -t +} MAIL_LOG=${logfile:-/var/log/mail.log} STATEFILE=$MUNIN_PLUGSTATE/postgrey.offset @@ -20,11 +20,10 @@ LOGTAIL=${logtail:-`which logtail`} if [ "$1" = "autoconf" ]; then if [ -f "${MAIL_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -69,7 +68,7 @@ then fi delayed=`grep 'Recipient address rejected.*Greylisted' ${TEMP_FILE} | wc -l` - passed=`grep 'postgrey\[[0-9]*\]: delayed [0-9]* seconds:' ${TEMP_FILE} | wc -l` + passed=`grep 'postgrey\[[0-9]*\]: delayed [0-9]* seconds:' ${TEMP_FILE} | wc -l` whitelisted=`grep 'postgrey\[[0-9]*\]: whitelisted:' ${TEMP_FILE} | wc -l` /bin/rm -f $TEMP_FILE diff --git a/plugins/postfix/postgrey-new b/plugins/postfix/postgrey-new index 3a8944a1..35199b24 100755 --- a/plugins/postfix/postgrey-new +++ b/plugins/postfix/postgrey-new @@ -9,7 +9,7 @@ # output of postgrey. You can change that by setting env.logfile # # The state file is /var/lib/munin/plugin-state/postgrey-new.state# # This can be changed by setting env.statefile # -# Keep in mind to grant enough rigths in order to open the # +# Keep in mind to grant enough rights in order to open the # # logfiles etc. # # Parameters understood by this plugin # # # @@ -47,12 +47,11 @@ my $passes_white_new=0; if(defined $ARGV[0] and $ARGV[0] eq "autoconf") { if ( -f $maillog) { print "yes\n"; - exit 0; } else { print "no\n"; - exit 1 } + exit 0; } @@ -62,7 +61,7 @@ if(defined $ARGV[0] and $ARGV[0] eq "config") { print "graph_category mail\n"; print "graph_vlabel Count\n"; print "graph_scale no\n"; - + print "greylisted.label greylisted_reason_new\n"; print "greylisted.type GAUGE\n"; print "greylisted.draw AREA\n"; @@ -93,7 +92,7 @@ if( -f $statefile) { chomp($retry_old); chomp($passes_white_old); close STATE; -} +} @@ -122,7 +121,7 @@ while(my $line = ) $passes_white++; } } - + } } diff --git a/plugins/postgresql/pg__connections b/plugins/postgresql/pg__connections index f6ca4018..e19222af 100755 --- a/plugins/postgresql/pg__connections +++ b/plugins/postgresql/pg__connections @@ -1,7 +1,7 @@ #!/usr/bin/perl -w # Plugin for monitor postgres connections without DBI. # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -63,7 +63,7 @@ if (exists $ARGV[0]) { my ($max_connections) = `$sqlCommand`; my $warning = int ($max_connections * 0.7); my $critical = int ($max_connections * 0.8); - print "graph_title PostgresSQL active connections\n"; + print "graph_title PostgreSQL active connections\n"; print "graph_args -l 0 --base 1000\n"; print "graph_vlabel Connections\n"; print "graph_category db\n"; diff --git a/plugins/postgresql/pgbouncer_ b/plugins/postgresql/pgbouncer_ index 7e429671..9257886f 100755 --- a/plugins/postgresql/pgbouncer_ +++ b/plugins/postgresql/pgbouncer_ @@ -31,7 +31,7 @@ my $plugin_title = ($db_pool) ? $plugin_suffix." ".$pool_name : $pool_name; if (!$pool_name) { print "Cannot get pool name\n"; - exit 1; + exit 1; } # command line arguments for autconf and config @@ -44,14 +44,13 @@ if (defined($ARGV[0])) if (!$dbh) { print "no\n"; - exit 1; } else { print "yes\n"; - exit 0; } $dbh->disconnect(); + exit 0; } if ($ARGV[0] eq 'config') @@ -164,7 +163,7 @@ if (defined($ARGV[0])) } # connect to data -my $dbh = DBI->connect("DBI:Pg:dbname=$db_name;host=$db_host;port=$db_port", $db_user, $db_pass) +my $dbh = DBI->connect("DBI:Pg:dbname=$db_name;host=$db_host;port=$db_port", $db_user, $db_pass) or die ("Cannot connect to database"); # go trough each set and get the data foreach my $get ('pools', 'stats') @@ -256,7 +255,7 @@ another example, where different pgbouncers (and so munin plugins) connecting to env.pgbouncer_port 6542 env.pgbouncer_host localhost env.pgbouncer_pool dbname - + [pgbouncer_webmain] env.pgbouncer_pass barfoo env.pgbouncer_user bar @@ -276,7 +275,7 @@ This graph will show the average bytes sent and received by the pgbouncer for th =head2 Average connections -This graph will show the average amount of connections to the pgbouncer for this pool +This graph will show the average amount of connections to the pgbouncer for this pool =head2 Average query time diff --git a/plugins/postgresql/pgbouncer_client_connections b/plugins/postgresql/pgbouncer_client_connections index b179636e..0ba6e0ec 100755 --- a/plugins/postgresql/pgbouncer_client_connections +++ b/plugins/postgresql/pgbouncer_client_connections @@ -46,11 +46,9 @@ if [ "$1" = "config" ]; then echo ${pool}.type GAUGE done - # If dirty config capability is enabled then fall through - # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + # If dirty config capability is enabled then fall through + # to output the data with the config information. + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # Output looks like this: diff --git a/plugins/postgresql/pgbouncer_maxwait b/plugins/postgresql/pgbouncer_maxwait index bae7e8d8..a257ec6d 100755 --- a/plugins/postgresql/pgbouncer_maxwait +++ b/plugins/postgresql/pgbouncer_maxwait @@ -46,11 +46,9 @@ if [ "$1" = "config" ]; then echo ${pool}.type GAUGE done - # If dirty config capability is enabled then fall through - # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + # If dirty config capability is enabled then fall through + # to output the data with the config information. + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # Output looks like this: diff --git a/plugins/postgresql/pgbouncer_server_connections b/plugins/postgresql/pgbouncer_server_connections index ddd95964..6b63d2a9 100755 --- a/plugins/postgresql/pgbouncer_server_connections +++ b/plugins/postgresql/pgbouncer_server_connections @@ -46,11 +46,9 @@ if [ "$1" = "config" ]; then echo ${pool}.type GAUGE done - # If dirty config capability is enabled then fall through - # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + # If dirty config capability is enabled then fall through + # to output the data with the config information. + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # Output looks like this: diff --git a/plugins/postgresql/postgres_block_read_ b/plugins/postgresql/postgres_block_read_ index 62a6f4b5..4f601e05 100755 --- a/plugins/postgresql/postgres_block_read_ +++ b/plugins/postgresql/postgres_block_read_ @@ -14,9 +14,9 @@ # http://www.postgresql.org/docs/7.4/interactive/monitoring-stats.html # for a (short) description. # -# Copyright Bjrn Ruberg 2006 +# Copyright Bjørn Ruberg 2006 # -# Licenced under GPL v2. +# Licensed under GPL v2. # # Usage: # @@ -28,8 +28,8 @@ # This should, however, be given through autoconf and suggest. # # If required, give username, password and/or Postgresql server -# host through environment variables. -# +# host through environment variables. +# # You must also activate Postgresql statistics. See # http://www.postgresql.org/docs/7.4/interactive/monitoring-stats.html # for how to enable this. Specifically, the following lines must @@ -46,11 +46,11 @@ # Config variables: # # dbhost - Which database server to use. Defaults to -# 'localhost'. +# 'localhost'. # dbuser - A Postgresql user account with read permission to # the given database. Defaults to # 'postgres'. Anyway, Munin must be told which user -# this plugin should be run as. +# this plugin should be run as. # dbpass - The corresponding password, if # applicable. Default to undef. Remember that # pg_hba.conf must be configured accordingly. @@ -76,7 +76,7 @@ if (exists $ARGV[0]) { # Check for DBD::Pg if (! eval "require DBD::Pg;") { print "no (DBD::Pg not found)"; - exit 1; + exit 0; } # Then we try to detect Postgres presence by connecting to # 'template1'. @@ -88,7 +88,7 @@ if (exists $ARGV[0]) { exit 0; } else { print "no (Can't connect to given host, please check environment settings)\n"; - exit 1; + exit 0; } } elsif ($ARGV[0] eq 'debug') { # Set debug flag @@ -128,7 +128,7 @@ if ($configure) { graph_title Postgres data reads from $dbname graph_args --base 1000 graph_vlabel Blocks read per \${graph_period} -graph_category db +graph_category db graph_info Shows number of blocks read from disk and from memory from_disk.label Read from disk from_disk.info Read from disk diff --git a/plugins/postgresql/postgres_queries2_ b/plugins/postgresql/postgres_queries2_ index 5c8185f2..dab14abb 100755 --- a/plugins/postgresql/postgres_queries2_ +++ b/plugins/postgresql/postgres_queries2_ @@ -14,7 +14,7 @@ cat << EOF graph_title Postgres queries2 on $db graph_args --base 1000 graph_vlabel Queries per \${graph_period} -graph_category db +graph_category db graph_info Shows number of select, insert, update and delete queries sel_seq.label s_selects sel_seq.info Sequential selects on all tables @@ -47,7 +47,7 @@ deletes.min 0 EOF else psql -At $db << EOF -select +select 'sel_seq.value ' || SUM(seq_scan) || E'\n' || 'sel_seq_rows.value ' || SUM(seq_tup_read) || E'\n' || 'sel_idx.value ' || SUM(idx_scan) || E'\n' || @@ -57,7 +57,7 @@ select 'deletes.value ' || SUM(n_tup_del) from pg_stat_all_tables; EOF - + # my $sql = "SELECT SUM(seq_scan),SUM(seq_tup_read), "; # $sql .= "SUM(idx_scan),SUM(idx_tup_fetch), "; # $sql .= "SUM(n_tup_ins),SUM(n_tup_upd),SUM(n_tup_del) "; diff --git a/plugins/postgresql/postgres_queries3_ b/plugins/postgresql/postgres_queries3_ index 6d4b873e..0946a337 100755 --- a/plugins/postgresql/postgres_queries3_ +++ b/plugins/postgresql/postgres_queries3_ @@ -1,6 +1,6 @@ #!/usr/bin/env perl -# postgres_queries3: see stats on number of rows +# postgres_queries3: see stats on number of rows # read, inserted, updated and deleted on a per table basis # # Author: @@ -12,7 +12,7 @@ # Usage: # Place in /etc/munin/plugins/ (or link it there using ln -s) # Place table names after '_' and delimit with '-' -# EX: postgres_queries3_table1-table2-table3 +# EX: postgres_queries3_db1-db2-db3 # # Parameters: # config (required) @@ -27,10 +27,10 @@ # On debian systems install libipc-run3-perl # # Log info: -# 20140701 - Initial -# 20140924 - +# 20140701 - Initial +# 20140924 - # -ignore internal pg tables -# -missing stuff in config +# -missing stuff in config use strict; @@ -40,7 +40,7 @@ use IPC::Run3 qw( run3 ); my %values; my $query = \<new( vlabel => 'Size', paramdatabase => 1, basequery => "SELECT CASE WHEN relkind = 'r' OR relkind = 't' THEN 'db_detail_data' WHEN relkind = 'i' THEN 'db_detail_index' WHEN relkind = 'v' THEN 'db_detail_view' WHEN relkind = 'S' THEN 'db_detail_sequence' ELSE 'db_detail_other' END AS state, - SUM(relpages::bigint * 8 * 1024) AS size + SUM(relpages::bigint * 8 * 1024) AS size FROM pg_class pg, pg_namespace pgn WHERE pg.relnamespace = pgn.oid AND pgn.nspname NOT IN ('information_schema', 'pg_catalog') GROUP BY state", configquery => [ "VALUES ('db_detail_data','Data size'),('db_detail_index','Index size'),('db_detail_sequence','Sequence size'),('db_detail_view','View size'),('db_detail_other','Other size')", diff --git a/plugins/postgresql/postgres_space_ b/plugins/postgresql/postgres_space_ index ed387631..46cd2b5f 100755 --- a/plugins/postgresql/postgres_space_ +++ b/plugins/postgresql/postgres_space_ @@ -1,9 +1,9 @@ #!/usr/bin/perl # (Temporary) source: http://munin.projects.linpro.no/ticket/63 -# Written by Bjrn Ruberg (bjorn@linpro.no) 2006 +# Written by Bjørn Ruberg (bjorn@linpro.no) 2006 # Rewritten by Moses Moore 2006-04-08 moc.iazom@sesom -# Licenced under GPL +# Licensed under GPL # Magic markers #%# family=auto @@ -31,7 +31,7 @@ if (exists $ARGV[0]) { # Check for DBD::Pg if (! eval "require DBD::Pg;") { print "no (DBD::Pg not found)"; - exit 1; + exit 0; } # Then we try to detect Postgres presence by connecting to # 'template1'. @@ -43,7 +43,7 @@ if (exists $ARGV[0]) { exit 0; } else { print "no (Can't connect to given host, please check environment settings)\n"; - exit 1; + exit 0; } } elsif ($ARGV[0] and $ARGV[0] eq 'debug') { # Set config flag @@ -105,17 +105,17 @@ _EOM my $metadatabase_pages = 0; my $metadatabase_indexes = 0; my @names = $dbh->tables; - + # Find relfilenode and relpages from the given table my $q_ind = "SELECT relkind, relfilenode, relpages FROM pg_class WHERE relname = ? UNION SELECT relkind, relfilenode, relpages FROM pg_class - WHERE relfilenode IN (SELECT indexrelid FROM pg_index + WHERE relfilenode IN (SELECT indexrelid FROM pg_index WHERE indrelid IN (SELECT relfilenode FROM pg_class WHERE relname = ?))"; my $sth = $dbh->prepare ($q_ind) or die $dbh->errstr; - + # Iterate over the tables in the database foreach my $table (@names) { my $meta = 1; @@ -127,7 +127,7 @@ _EOM # "public" tables are the user data $meta = 0 if $table =~ /^public\./; $table =~ s/^.*\.//; - + # Call the query with $table twice for each side of the UNION $sth->execute ($table, $table) or die $dbh->errstr; while (my ($relkind, $relfilenode, $relpages) = $sth->fetchrow_array) { @@ -139,8 +139,8 @@ _EOM $metatable_indexes += $relpages if $meta == 1; } # Define the query - my $q2 = "SELECT SUM(relpages) - FROM pg_class + my $q2 = "SELECT SUM(relpages) + FROM pg_class WHERE relname IN (?, ?)"; my $sth2 = $dbh->prepare ($q2); $sth2->execute ("pg_toast_${relfilenode}", @@ -159,7 +159,7 @@ _EOM $database_indexes += $table_indexes; $metadatabase_pages += $metatable_pages; $metadatabase_indexes += $metatable_indexes; - } + } $sth->finish; $dbh->disconnect; print "size\.value " . $database_pages * 8192 . "\n"; diff --git a/plugins/postgresql/postgres_tuplesratio_ b/plugins/postgresql/postgres_tuplesratio_ old mode 100644 new mode 100755 diff --git a/plugins/postgresql/postgresql_active_backends_by_database b/plugins/postgresql/postgresql_active_backends_by_database index e4a53382..78a34745 100755 --- a/plugins/postgresql/postgresql_active_backends_by_database +++ b/plugins/postgresql/postgresql_active_backends_by_database @@ -73,9 +73,7 @@ if [ "$1" = "config" ]; then # If dirty config capability is enabled then fall through # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi while read pool sep backends junk diff --git a/plugins/postgresql/postgresql_tablespace_size b/plugins/postgresql/postgresql_tablespace_size index cae81458..044969ce 100755 --- a/plugins/postgresql/postgresql_tablespace_size +++ b/plugins/postgresql/postgresql_tablespace_size @@ -23,6 +23,7 @@ dbserver='localhost' dbuser='postgres' +dbpass='' if [ "$1" = "config" ]; then echo 'graph_args --base 1024 --lower-limit 0' @@ -31,7 +32,7 @@ if [ "$1" = "config" ]; then echo 'graph_title PostgreSQL Tablespace Sizes' echo 'graph_vlabel Size (bytes)' - psql -h ${dbserver} -U ${dbuser} -tc "SELECT spcname FROM pg_tablespace ORDER BY 1;" | while read name + PGPASSWORD="${dbpass}" psql -h ${dbserver} -U ${dbuser} -tc "SELECT spcname FROM pg_tablespace ORDER BY 1;" | while read name do test -z "${name}" && continue echo ${name}'.label '${name} @@ -47,7 +48,7 @@ if [ "$1" = "config" ]; then exit 0 fi -psql -h ${dbserver} -U ${dbuser} -tc "SELECT spcname, PG_TABLESPACE_SIZE(oid) FROM pg_tablespace ORDER BY 1;" | while read name sep num +PGPASSWORD="${dbpass}" psql -h ${dbserver} -U ${dbuser} -tc "SELECT spcname, PG_TABLESPACE_SIZE(oid) FROM pg_tablespace ORDER BY 1;" | while read name sep num do test -z "${name}" && continue echo ${name}'.value '${num} diff --git a/plugins/postgresql/postgresql_transactions b/plugins/postgresql/postgresql_transactions index f13740a5..894a9aab 100755 --- a/plugins/postgresql/postgresql_transactions +++ b/plugins/postgresql/postgresql_transactions @@ -1,43 +1,71 @@ -#!/bin/bash -# -# Plugin to monitor PostgreSQL Commits and Rollbacks in Transactions -# -# Author: -# Guilherme Augusto da Rocha Silva -# -# Created: -# 9th of november 2007 -# -# Usage: -# Place in /etc/munin/plugins/ (or link it there using ln -s) -# -# Parameters: -# config (required) -# -# General info: -# Require permission for database access and read (no writes are processed). -# Recommended user is PostgreSQL database owner (default: postgres). -# -# Log info: -# +#!/bin/sh -dbserver='localhost' -dbuser='postgres' +: <<=cut -if [ "$1" = "config" ]; then +=head1 NAME + +postgresql_transactions - Plugin to monitor PostgreSQL Commits and Rollbacks in Transactions + + +=head1 USAGE + +Usage: + Place in /etc/munin/plugins/ (or link it there using ln -s) + +General info: + Requires permission for database read access (no writes are processed). + Recommended user is PostgreSQL database owner (default: postgres). + + +=head1 CONFIGURATION + +The following configuration directives may be placed below /etc/munin/plugin-conf.d/ (optional): + + [postgresql_transactions] + user postgres + env.dbuser postgres + env.dbhost localhost + + +=head1 AUTHOR + + Guilherme Augusto da Rocha Silva + + Copyright (C) 2007 Guilherme Augusto da Rocha Silva + +=cut + +dbhost=${dbhost:-localhost} +dbuser=${dbuser:-postgres} + + +do_config() { echo 'graph_args --base 1000 --lower-limit 0' echo 'graph_category db' echo 'graph_info Shows summarized commits and rollbacks in transactions on the PostgreSQL Server.' echo 'graph_title PostgreSQL Transactions' - echo 'graph_vlabel Number of Commits and Rollbacks' + echo 'graph_vlabel Commits and Rollbacks per second' echo 'commits.label commits' echo 'commits.min 0' - echo 'commits.info Number of transaction commits.' + echo 'commits.type DERIVE' + echo 'commits.info Number of transaction commits per second.' echo 'rollbacks.label rollbacks' echo 'rollbacks.min 0' - echo 'rollbacks.info Number of transaction rollbacks.' - exit 0 + echo 'rollbacks.type DERIVE' + echo 'rollbacks.info Number of transaction rollbacks per second.' +} + + +do_fetch() { + psql -h "$dbhost" -U "$dbuser" -tc "SELECT 'commits.value '||SUM(xact_commit)::TEXT||E'\\nrollbacks.value '||SUM(xact_rollback)::TEXT FROM pg_stat_database;" --no-align +} + + +if [ "$1" = "config" ]; then + do_config + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then do_fetch; fi +else + do_fetch fi -psql -h ${dbserver} -U ${dbuser} -tc "SELECT 'commits.value '||SUM(xact_commit)::TEXT||E'\nrollbacks.value '||SUM(xact_rollback)::TEXT FROM pg_stat_database;" --no-align diff --git a/plugins/postgresql/slony_lag_events_ b/plugins/postgresql/slony_lag_events_ index 13d88f83..f85d6272 100755 --- a/plugins/postgresql/slony_lag_events_ +++ b/plugins/postgresql/slony_lag_events_ @@ -14,7 +14,7 @@ # st_last_received_event_ts: the timestamp on the sl_event in that pair # st_lag_num_events: difference between st_last_event and st_last_received # st_lag_time: difference between st_last_event_ts and st_last_received_ts -# +# # # Configuration variables: # @@ -54,7 +54,7 @@ if [ "$1" = "config" ]; then echo "graph_title Slony lag events for ${PGDATABASE}" echo 'graph_vlabel event' - psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT no_id,regexp_replace(pa_conninfo, '.*host=(.*?) .*$', '\\1') FROM ${PGSCHEMA}.sl_node JOIN ${PGSCHEMA}.sl_path ON (pa_server=no_id) WHERE pa_client=${PGSCHEMA}.getlocalnodeid('${PGSCHEMA}'::name);" | while read node_id sep host + psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT no_id,regexp_replace(pa_conninfo, '.*host=(.*?) .*$', '\\1') FROM ${PGSCHEMA}.sl_node JOIN ${PGSCHEMA}.sl_path ON (pa_server=no_id) WHERE pa_client=${PGSCHEMA}.getlocalnodeid('${PGSCHEMA}'::name);" | while read node_id sep host do test -z "${node_id}" && continue echo "${node_id}.label ${host}" @@ -68,7 +68,7 @@ if [ "$1" = "config" ]; then fi psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT st_received, st_lag_num_events FROM ${PGSCHEMA}.sl_status ORDER BY 1;" | while read node_id sep event -do +do test -z "${node_id}" && continue echo "${node_id}.value ${event}" done diff --git a/plugins/postgresql/slony_lag_time b/plugins/postgresql/slony_lag_time index 3ba34422..ddbcd99f 100755 --- a/plugins/postgresql/slony_lag_time +++ b/plugins/postgresql/slony_lag_time @@ -1,6 +1,6 @@ #!/bin/bash # -# plugin to monitor difference between st_last_event_ts and st_last_received_ts +# plugin to monitor difference between st_last_event_ts and st_last_received_ts # in sl_status table (based on slony_ and slony_lag_) # # http://blog.endpoint.com/2009/07/slony-slstatus-and-diagnosing.html @@ -14,7 +14,7 @@ # st_last_received_event_ts: the timestamp on the sl_event in that pair # st_lag_num_events: difference between st_last_event and st_last_received # st_lag_time: difference between st_last_event_ts and st_last_received_ts -# +# # # Configuration variables: # @@ -54,7 +54,7 @@ if [ "$1" = "config" ]; then echo "graph_title Slony lag time for ${PGDATABASE}" echo "graph_vlabel \${graph_period}" - psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT no_id,regexp_replace(pa_conninfo, '.*host=(.*?) .*$', '\\\\1') FROM ${PGSCHEMA}.sl_node JOIN ${PGSCHEMA}.sl_path ON (pa_server=no_id) WHERE pa_client=${PGSCHEMA}.getlocalnodeid('${PGSCHEMA}'::name);" | while read node_id sep host + psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT no_id,regexp_replace(pa_conninfo, '.*host=(.*?) .*$', '\\\\1') FROM ${PGSCHEMA}.sl_node JOIN ${PGSCHEMA}.sl_path ON (pa_server=no_id) WHERE pa_client=${PGSCHEMA}.getlocalnodeid('${PGSCHEMA}'::name);" | while read node_id sep host do test -z "${node_id}" && continue echo "${node_id}.label ${host}" @@ -68,7 +68,7 @@ if [ "$1" = "config" ]; then fi psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT st_received, extract(epoch FROM st_lag_time)::integer FROM ${PGSCHEMA}.sl_status ORDER BY 1;" | while read node_id sep time -do +do test -z "${node_id}" && continue echo "${node_id}.value ${time}" done diff --git a/plugins/postgresql/slony_lag_time_ b/plugins/postgresql/slony_lag_time_ index c671f657..0ab90119 100755 --- a/plugins/postgresql/slony_lag_time_ +++ b/plugins/postgresql/slony_lag_time_ @@ -1,6 +1,6 @@ #!/bin/bash # -# plugin to monitor difference between st_last_event_ts and st_last_received_ts +# plugin to monitor difference between st_last_event_ts and st_last_received_ts # in sl_status table (based on slony_ and slony_lag_) # # http://blog.endpoint.com/2009/07/slony-slstatus-and-diagnosing.html @@ -14,7 +14,7 @@ # st_last_received_event_ts: the timestamp on the sl_event in that pair # st_lag_num_events: difference between st_last_event and st_last_received # st_lag_time: difference between st_last_event_ts and st_last_received_ts -# +# # # Configuration variables: # @@ -54,7 +54,7 @@ if [ "$1" = "config" ]; then echo "graph_title Slony lag time for ${PGDATABASE}" echo "graph_vlabel \${graph_period}" - psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT no_id,regexp_replace(pa_conninfo, '.*host=(.*?) .*$', '\\1') FROM ${PGSCHEMA}.sl_node JOIN ${PGSCHEMA}.sl_path ON (pa_server=no_id) WHERE pa_client=${PGSCHEMA}.getlocalnodeid('${PGSCHEMA}'::name);" | while read node_id sep host + psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT no_id,regexp_replace(pa_conninfo, '.*host=(.*?) .*$', '\\1') FROM ${PGSCHEMA}.sl_node JOIN ${PGSCHEMA}.sl_path ON (pa_server=no_id) WHERE pa_client=${PGSCHEMA}.getlocalnodeid('${PGSCHEMA}'::name);" | while read node_id sep host do test -z "${node_id}" && continue echo "${node_id}.label ${host}" @@ -68,7 +68,7 @@ if [ "$1" = "config" ]; then fi psql -h ${PGHOST} -d ${PGDATABASE} -U ${PGUSER} -tc "SELECT st_received, extract(epoch FROM st_lag_time)::integer FROM ${PGSCHEMA}.sl_status ORDER BY 1;" | while read node_id sep time -do +do test -z "${node_id}" && continue echo "${node_id}.value ${time}" done diff --git a/plugins/power/apcupsd_pct b/plugins/power/apcupsd_pct index dec45cfb..2e4452aa 100755 --- a/plugins/power/apcupsd_pct +++ b/plugins/power/apcupsd_pct @@ -208,14 +208,14 @@ munin plugin to monitor APC UPS via apcupsd by apcaccess. =head1 INSTALLATION cp apcupsd_pct $MUNIN_LIBDIR/plugsin/ - + cd YOUR_MUNIN_PLUGINS_DIR (make symbolic links different name) ln -s $MUNIN_LIBDIR/plugsin/apcupsd_pct apcupsd_pct ln -s $MUNIN_LIBDIR/plugsin/apcupsd_pct apcupsd_volt ln -s $MUNIN_LIBDIR/plugsin/apcupsd_pct apcupsd_time ln -s $MUNIN_LIBDIR/plugsin/apcupsd_pwr apcupsd_pwr - + restart munin-node =head1 REPOSITORY diff --git a/plugins/power/apcupsd_ww b/plugins/power/apcupsd_ww index b50067b3..f144537f 100755 --- a/plugins/power/apcupsd_ww +++ b/plugins/power/apcupsd_ww @@ -1,15 +1,15 @@ #!/usr/bin/perl -w -# +# # Plugin to monitor apcupsd via apcaccess # # Version 1.3 -# +# # Copyright (C) 2005-2008 Behan Webster -# Licenced under GPL 2.0 +# Licensed under GPL 2.0 # # Written by: Behan Webster # German translation by: Bianco Veigel -# +# #%# family=auto #%# capabilities=autoconf @@ -28,7 +28,7 @@ my $language = $ENV{LANG} || 'en'; # # UPSNAME : Elfhild # MODEL : SMART-UPS 1400 RM XL -# STATUS : ONLINE +# STATUS : ONLINE # LINEV : 123.5 Volts # LOADPCT : 24.9 Percent Load Capacity # BCHARGE : 100.0 Percent @@ -178,11 +178,10 @@ if (-f $config) { if (defined $ARGV[0] && $ARGV[0] =~ /autoconf|detect/) { if (-x $apcaccess) { print "yes\n"; - exit 0; } else { print "no (apcaccess not found)\n"; - exit 1; } + exit 0; } # Read info from apcupsd using apcaccess diff --git a/plugins/power/currentcost b/plugins/power/currentcost index 956acfc8..ea34f531 100755 --- a/plugins/power/currentcost +++ b/plugins/power/currentcost @@ -58,7 +58,7 @@ The configuration can be broken down into the following subsections: =item env.device -Specfies the device node where the CurrentCost monitor can be found. You may find it useful to use a udev rule to symlink this somewhere permanent. +Specifies the device node where the CurrentCost monitor can be found. You may find it useful to use a udev rule to symlink this somewhere permanent. =item env.baud @@ -100,7 +100,7 @@ The time period for which C applies. This should be of the form C =item env.standingcharge -The standing charge in hundreths of a C per month. If you do not have a standing charge, set this to 0. +The standing charge in hundredths of a C per month. If you do not have a standing charge, set this to 0. =item env.metertype @@ -314,8 +314,8 @@ As per L 00014 days since birth
14 the time - 07 - 07 + 07 + 07 CC02 name of this device @@ -336,71 +336,71 @@ As per L 000.0 total Kwh used in 2 hour blocks - 000.1 - 000.1 - 000.0 - 000.0 - 000.0 - 000.0 - 000.1 - 000.1 - 000.1 - 000.1 - 000.0 - 000.0 + 000.1 + 000.1 + 000.0 + 000.0 + 000.0 + 000.0 + 000.1 + 000.1 + 000.1 + 000.1 + 000.0 + 000.0 0000 total Kwh used per day(s) - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 0000 total Kwh used per month(s) - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 - 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 + 0000 0000000 total Kwh used per year(s) - 0000000 - 0000000 - 0000000 + 0000000 + 0000000 + 0000000 @@ -427,7 +427,7 @@ For full definition, see L 00000 end of message - + =cut sub collect_cc128_data { diff --git a/plugins/power/eatonups_ b/plugins/power/eatonups_ old mode 100644 new mode 100755 diff --git a/plugins/power/nut b/plugins/power/nut index 0bd56048..31e34002 100755 --- a/plugins/power/nut +++ b/plugins/power/nut @@ -41,17 +41,16 @@ load=$(upsc $UPS | grep ups.load: | cut -d" " -f2) charge=$(upsc $UPS | grep battery.charge: | cut -d" " -f2) } -# Munin routines +# Munin routines case "$1" in autoconf) grep ^MONITOR < /etc/nut/upsmon.conf &> /dev/null if [[ "$?" = "0" ]]; then echo yes - exit 0 else echo "no (NUT not installed or no UPS info available in /etc/nut/upsmon.conf)" - exit 1 fi + exit 0 ;; config) FETCH_DATA @@ -78,8 +77,8 @@ EOM grep ^MONITOR < /etc/nut/upsmon.conf | cut -d" " -f2 | sed 's|@|_AT_|g' exit 0 ;; - *) - + *) + FETCH_DATA # Print data for Munin cat << EOM diff --git a/plugins/power/nutups2_ b/plugins/power/nutups2_ index 9730d457..73d6881f 100755 --- a/plugins/power/nutups2_ +++ b/plugins/power/nutups2_ @@ -239,7 +239,7 @@ sub common_config { print $id . ".label " . $field . "\n"; print $id . ".type GAUGE\n"; - # Draw nominal values a litle thinner + # Draw nominal values a little thinner print $id . ".draw LINE1\n" if $nominal; print_range_warning($id, $key, $values); diff --git a/plugins/power/snmp__ipoman_ b/plugins/power/snmp__ipoman_ index 9524460b..011852e1 100755 --- a/plugins/power/snmp__ipoman_ +++ b/plugins/power/snmp__ipoman_ @@ -3,8 +3,8 @@ # What is snmp__ipoman_ # ---------------------- # snmp__ipoman is a munin plugin written for the Ingrasys IpomanII 1202 -# Power Distribution Unit. It should work on any PDU conforming to -# the IPOMANII-MIB. +# Power Distribution Unit. It should work on any PDU conforming to +# the IPOMANII-MIB. # # How do I use it # --------------- @@ -12,7 +12,7 @@ # how: # # 1. Copy snmp__ipoman_ to the directory where all your munin plugins -# reside, for example /usr/share/munin/plugins. +# reside, for example /usr/share/munin/plugins. # # 2. Make the following symlinks to snmp__ipoman_ in that same directory # @@ -40,7 +40,7 @@ # output current and power usage for all available outlets of the # ipoman, and current, power usage and voltage/frequency on all inlets # of the ipoman. -# +# # 5. Restart munin-node # # 6. Make an entry in your munin server's munin.conf: @@ -48,7 +48,7 @@ # [] # address
# use_node_name no -# +# # 7. Done. # # Copyright (C) 2009 Rien Broekstra @@ -67,21 +67,21 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Munin plugin to monitor power consumption and current of the sockets of an -# Ingrasys IpomanII 1202 Power Distribution Unit, or any power distribution +# Munin plugin to monitor power consumption and current of the sockets of an +# Ingrasys IpomanII 1202 Power Distribution Unit, or any power distribution # unit that conforms to IPOMANII-MIB via SNMP. # # Parameters: # -# config -# snmpconf +# config +# snmpconf # # Relevant OID's under .iso.org.dod.internet.private.enterprises.ingrasys.product.pduAgent.iPoManII # .ipmObjects.ipmDevice.ipmDeviceOutlet.ipmDeviceOutletNumber.0 # .ipmObjects.ipmDevice.ipmDeviceOutlet.ipmDeviceOutletStatusTable.ipmDeviceOutletStatusEntry.outletStatusIndex.1 # .ipmObjects.ipmDevice.ipmDeviceOutlet.ipmDeviceOutletStatusTable.ipmDeviceOutletStatusEntry.outletStatusCurrent.1 # .ipmObjects.ipmDevice.ipmDeviceOutlet.ipmDeviceOutletStatusTable.ipmDeviceOutletStatusEntry.outletStatusKwatt.1 -# .ipmObjects.ipmDevice.ipmDeviceOutlet.ipmDeviceOutletStatusTable.ipmDeviceOutletStatusEntry.outletStatusWH.1 +# .ipmObjects.ipmDevice.ipmDeviceOutlet.ipmDeviceOutletStatusTable.ipmDeviceOutletStatusEntry.outletStatusWH.1 # # Version 0.1, Aug 4, 2009 # @@ -134,15 +134,15 @@ if (!defined($graphtype)) { # # The relevant OID's on the IPOMAN # -my $oid_inletnumber = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.1.0"; -my $oid_inletindextable = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.2.1.1."; +my $oid_inletnumber = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.1.0"; +my $oid_inletindextable = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.2.1.1."; my $oid_inletvoltage = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.3.1.2."; my $oid_inletcurrent = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.3.1.3."; my $oid_inletfrequency = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.3.1.4."; my $oid_inletenergy = ".1.3.6.1.4.1.2468.1.4.2.1.3.1.3.1.5."; -my $oid_outletnumber = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.1.0"; -my $oid_outletindextable = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.3.1.1."; +my $oid_outletnumber = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.1.0"; +my $oid_outletindextable = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.3.1.1."; my $oid_outletdescription = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.2.1.2."; my $oid_outletcurrent = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.3.1.3."; my $oid_outletenergy = ".1.3.6.1.4.1.2468.1.4.2.1.3.2.3.1.4."; @@ -219,7 +219,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "graph_args --base 1000 -l 0\n"; print "graph_category sensors\n"; print "graph_info This graph shows the tension and frequency to inlet $socketnumber on the Power Distribution Unit\n"; - + print "voltage.label Tension (V)\n"; print "voltage.draw LINE2\n"; print "voltage.type GAUGE\n"; @@ -235,7 +235,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "graph_args --base 1000 -l 0\n"; print "graph_category sensors\n"; print "graph_info This graph shows the delivered current to inlet $socketnumber on the Power Distribution Unit\n"; - + print "current.label Current (A)\n"; print "current.draw AREA\n"; print "current.type GAUGE\n"; @@ -247,43 +247,43 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "graph_args --base 1000 -l 0\n"; print "graph_category sensors\n"; print "graph_info This graph shows the delivered apparent and real power to inlet $socketnumber of the Power Distribution Unit\n"; - + print "apparentpower.label Apparent power (kVA)\n"; print "apparentpower.draw LINE3\n"; print "apparentpower.type GAUGE\n"; - + print "realpower.label Real power (kW)\n"; print "realpower.draw AREA\n"; print "realpower.type COUNTER\n"; - + exit 0; } elsif ($graphtype eq "outletcurrent") { print "graph_title Outlet $socketnumber current\n"; - + print "graph_args --base 1000 -l 0\n"; print "graph_category sensors\n"; print "graph_info This graph shows the delivered current to outlet $socketnumber of the Power Distribution Unit\n"; - + print "current.label Delivered current (A)\n"; print "current.draw AREA\n"; print "current.type GAUGE\n"; } elsif ($graphtype eq "outletpower") { print "graph_title Outlet $socketnumber power\n"; - + print "graph_args --base 1000 -l 0\n"; print "graph_category sensors\n"; print "graph_info This graph shows the delivered apparent and real power to outlet $socketnumber of the Power Distribution Unit\n"; - + print "apparentpower.label Apparent power (kVA)\n"; print "apparentpower.draw LINE3\n"; print "apparentpower.type GAUGE\n"; - + print "realpower.label Real power (kW)\n"; print "realpower.draw AREA\n"; print "realpower.type COUNTER\n"; - + exit 0; } exit 0; @@ -342,7 +342,7 @@ elsif ($graphtype eq "inletcurrent") { } elsif ($graphtype eq "inletpower") { my ($current, $energy, $voltage, $apparentpower); - + if (defined ($response = $session->get_request($oid_inletcurrent.$socketnumber))) { $current = $response->{$oid_inletcurrent.$socketnumber}; } @@ -402,7 +402,7 @@ elsif ($graphtype eq "outletcurrent") { } elsif ($graphtype eq "outletpower") { my ($current, $energy, $voltage, $apparentpower); - + if (defined ($response = $session->get_request($oid_outletcurrent.$socketnumber))) { $current = $response->{$oid_outletcurrent.$socketnumber}; } diff --git a/plugins/power/snmp__sentry b/plugins/power/snmp__sentry index 4631b0c4..32ddece0 100755 --- a/plugins/power/snmp__sentry +++ b/plugins/power/snmp__sentry @@ -31,9 +31,9 @@ Sentry3-MIB::infeedID.1.3 = STRING: AC Sentry3-MIB::infeedName.1.1 = STRING: Master_X Sentry3-MIB::infeedName.1.2 = STRING: Master_Y Sentry3-MIB::infeedName.1.3 = STRING: Master_Z -Sentry3-MIB::infeedCapabilities.1.1 = BITS: C6 00 onSense(0) loadSense(1) voltageSense(5) powerSense(6) -Sentry3-MIB::infeedCapabilities.1.2 = BITS: C6 00 onSense(0) loadSense(1) voltageSense(5) powerSense(6) -Sentry3-MIB::infeedCapabilities.1.3 = BITS: C6 00 onSense(0) loadSense(1) voltageSense(5) powerSense(6) +Sentry3-MIB::infeedCapabilities.1.1 = BITS: C6 00 onSense(0) loadSense(1) voltageSense(5) powerSense(6) +Sentry3-MIB::infeedCapabilities.1.2 = BITS: C6 00 onSense(0) loadSense(1) voltageSense(5) powerSense(6) +Sentry3-MIB::infeedCapabilities.1.3 = BITS: C6 00 onSense(0) loadSense(1) voltageSense(5) powerSense(6) Sentry3-MIB::infeedStatus.1.1 = INTEGER: on(1) Sentry3-MIB::infeedStatus.1.2 = INTEGER: on(1) Sentry3-MIB::infeedStatus.1.3 = INTEGER: on(1) @@ -153,7 +153,7 @@ graph_info This shows the amperage drawn on your PDU. Per NEC, a PDU should not my $infeedName = $sentry_h->{$k}->{'infeedName'}; my $critical = ($sentry_h->{$k}->{'infeedCapacity'})*.9; # 90% of capacity my $warning = $sentry_h->{$k}->{'infeedLoadHighThresh'}; # 80% of capacity - + print "$infeedName.critical $critical\n"; print "$infeedName.draw LINE1\n"; print "$infeedName.label $infeedName\n"; @@ -175,7 +175,7 @@ graph_info Power factor represents the efficiency of the components connected to foreach my $k ( keys %{$sentry_h} ) { my $infeedName = $sentry_h->{$k}->{'infeedName'}; - + print "$infeedName.draw LINE1\n"; print "$infeedName.label $infeedName\n"; print "$infeedName.type GAUGE\n"; @@ -194,7 +194,7 @@ graph_info Crest factor relates the peak value of a signal to its root mean squa foreach my $k ( keys %{$sentry_h} ) { my $infeedName = $sentry_h->{$k}->{'infeedName'}; - + print "$infeedName.draw LINE1\n"; print "$infeedName.label $infeedName\n"; print "$infeedName.type GAUGE\n"; diff --git a/plugins/power/upsmonpro_ b/plugins/power/upsmonpro_ old mode 100644 new mode 100755 index 0008e4e2..7b7fe428 --- a/plugins/power/upsmonpro_ +++ b/plugins/power/upsmonpro_ @@ -121,7 +121,7 @@ END sub run_config_load { print <<'END'; graph_title UPS Battery Load/Capacity -graph_vlabel precent% +graph_vlabel percent (%) graph_scale no graph_category sensors battery_load.label battery_load diff --git a/plugins/power5/consumed_cpu_cycles b/plugins/power5/consumed_cpu_cycles index 950c31a6..070f04bd 100755 --- a/plugins/power5/consumed_cpu_cycles +++ b/plugins/power5/consumed_cpu_cycles @@ -42,9 +42,9 @@ if [ "$1" = "config" ]; then echo 'graph_category cpu' echo 'graph_vlabel CPU cycles' echo 'graph_info This graph shows the CPU cycles on an uncapped LPAR' - + echo 'cpuCycles.label used CPU cycles' - + exit 0 fi diff --git a/plugins/power5/cpu_in_lpar b/plugins/power5/cpu_in_lpar index aab20829..9d91f63a 100755 --- a/plugins/power5/cpu_in_lpar +++ b/plugins/power5/cpu_in_lpar @@ -41,10 +41,10 @@ if [ "$1" = "config" ]; then echo 'graph_category cpu' echo 'graph_vlabel CPUs in LPAR' echo 'graph_info This graph shows potential and active processors for a LPAR.' - + echo 'potentialLparCpu.label partition potential processors' echo 'activeLparCpu.label partition active processors' - + exit 0 fi diff --git a/plugins/power5/weight_of_a_lpar b/plugins/power5/weight_of_a_lpar index 2d2e2b05..fbf32209 100755 --- a/plugins/power5/weight_of_a_lpar +++ b/plugins/power5/weight_of_a_lpar @@ -43,9 +43,9 @@ if [ "$1" = "config" ]; then echo 'graph_info This graph shows the weight of an uncapped LPAR' echo 'weight.min 0' echo 'weight.max 255' - + echo 'LparWeight.label weight' - + exit 0 fi diff --git a/plugins/powermta/pmta_ b/plugins/powermta/pmta_ index cace6ce9..6c1e70db 100755 --- a/plugins/powermta/pmta_ +++ b/plugins/powermta/pmta_ @@ -12,21 +12,21 @@ # Copyright (c) 2011, emarsys eMarketing Systems AG # All rights reserved. # -# Redistribution and use in source and binary forms, with or without modification, are permitted provided that +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that # the following conditions are met: # -# Redistributions of source code must retain the above copyright notice, this list of conditions and the -# following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list -# of conditions and the following disclaimer in the documentation and/or other materials provided with the -# distribution. Neither the name of the emarsys eMarketing Systems AG nor the names of its contributors may +# Redistributions of source code must retain the above copyright notice, this list of conditions and the +# following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other materials provided with the +# distribution. Neither the name of the emarsys eMarketing Systems AG nor the names of its contributors may # be used to endorse or promote products derived from this software without specific prior written permission. # -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # @@ -53,7 +53,7 @@ # lrwxrwxrwx 1 root root 5 2011-11-15 16:31 pmta_top_domains -> pmta_ # # o alternative multi-host configuration: -# you can also link the pmta_ script to various hosts and configure munin to respond +# you can also link the pmta_ script to various hosts and configure munin to respond # to multiple REMOTEHOSTs, just link pmta_ in this schema and configure munin # accordingly - DO NOT USE DOTS IN YOUR IDENTIFIER VARIABLE, USE UNDERSCORE INSTEAD! # for example: @@ -63,7 +63,7 @@ # lrwxrwxrwx 1 root root 5 2011-11-18 12:18 mailhost4_domain_com_pmta_top_domains -> pmta_ # [...] # -# conf.: +# conf.: # [mailhost1_domain_com_pmta*] # env.REMOTEHOST somehostORip # env.PORT portnumber @@ -91,18 +91,18 @@ fi fetch_xml() { # o arg1 specifies xpath or false # o arg2 specifies sed/regex or false - # o arg3 specifies category ('status', 'domains',..) or false + # o arg3 specifies category ('status', 'domains',..) or false HTTPQUERY="GET /${3}?format=xml HTTP/1.1\n\n" REMOTE=`echo ${HTTPQUERY} | nc -q 1 -w 5 ${REMOTEHOST} ${PORT} | tail -1` XML_DATA=${REMOTE} - SED="s/.*<$2>\([^<]*\)<\/$2>.*/\1/p" - + SED="s/.*<$2>\([^<]*\)<\/$2>.*/\1/p" + if [ "$3" = "status" ]; then RET=`echo $XML_DATA | xpath -q -e $1 | sed -n $SED | cut -f1 -d '.'` elif [ "$3" = "domains" ]; then RET=`echo $XML_DATA | xpath -q -e '//domain/*[self::name or self::rcp]' | sed 's/<[^>]*>//g' | sed 's/\./_/g'` - fi - + fi + if [ "$RET" ]; then echo $RET return 0 @@ -122,15 +122,15 @@ case $NAME_SELF in CONF_TITLE="powermta ${WHAT}bound traffic" CONF_LABEL="traffic_${WHAT}" CONF_SELF="traffic_${WHAT}bound" - - if [ -z $1 ]; then - GET=`fetch_xml //traffic//lastMin/${WHAT} kb status` + + if [ -z $1 ]; then + GET=`fetch_xml //traffic//lastMin/${WHAT} kb status` RETURN=`echo $GET / 1024 | bc` - RCPT=`fetch_xml //traffic//lastMin/${WHAT} rcp status` - MSGS=`fetch_xml //traffic//lastMin/${WHAT} msg status` + RCPT=`fetch_xml //traffic//lastMin/${WHAT} rcp status` + MSGS=`fetch_xml //traffic//lastMin/${WHAT} msg status` echo "megabytes.value $RETURN" echo "recipients.value $RCPT" - echo "messages.value $MSGS" + echo "messages.value $MSGS" exit 0 elif [ $1 = config ]; then echo "megabytes.label megabytes" @@ -154,10 +154,10 @@ case $NAME_SELF in CONF_TITLE=`echo "powermta ${WHAT}bound connections" | tr [:upper:] [:lower:]` CONF_LABEL=`echo "conn_${WHAT}" | tr [:upper:] [:lower:]` CONF_SELF=`echo "connections_${WHAT}bound" | tr [:upper:] [:lower:]` - if [ -z $1 ]; then - GET=`fetch_xml //conn//smtp${WHAT} cur status` + if [ -z $1 ]; then + GET=`fetch_xml //conn//smtp${WHAT} cur status` echo "${CONF_SELF}.value $GET" - exit 0 + exit 0 elif [ $1 = config ]; then echo "${CONF_SELF}.label ${WHAT}bound connections" echo "${CONF_SELF}.draw LINE1" @@ -165,7 +165,7 @@ case $NAME_SELF in AUTOLABEL="no" fi ;; - *pmta_queue_domains | *pmta_queue_recipients | *pmta_queue_megabytes) + *pmta_queue_domains | *pmta_queue_recipients | *pmta_queue_megabytes) if [ `expr match "$NAME_SELF" ".*pmta_queue_domains"` != 0 ]; then WHAT="domains" elif [ `expr match "$NAME_SELF" ".*pmta_queue_recipients"` != 0 ]; then @@ -175,28 +175,28 @@ case $NAME_SELF in UNIT_KB="true" fi CONF_TITLE="powermta ${WHAT} in queue" - CONF_LABEL="queue_${WHAT}" - CONF_SELF="${WHAT}" - + CONF_LABEL="queue_${WHAT}" + CONF_SELF="${WHAT}" + if [ -z $1 ]; then if [ !$UNIT_KB ]; then - GET=`fetch_xml //queue/smtp dom status` + GET=`fetch_xml //queue/smtp dom status` echo "${CONF_SELF}.value $GET" - exit 0 - else - GET=`fetch_xml //queue/smtp kb status` + exit 0 + else + GET=`fetch_xml //queue/smtp kb status` RETURN=`echo $GET / 1024 | bc` echo "${CONF_SELF}.value $RETURN" - exit 0 + exit 0 fi - fi + fi ;; *pmta_top_domains) CONF_TITLE="powermta top 10 domains by recipients" CONF_LABEL="top_domains" CONF_SELF="domains" AUTOLABEL="no" - + GET=`fetch_xml false false domains` if [ -z $1 ]; then for values in $GET; do @@ -204,7 +204,7 @@ case $NAME_SELF in echo $values else echo -n "$values.value#" - fi + fi done | tr ' ' '\n' | tr '#' ' ' # restore newline - replace hashtag with whitespace exit 0 elif [ $1 = config ]; then @@ -239,11 +239,11 @@ case $1 in fi fi exit 0 - ;; + ;; autoconf) # tell munin this script supports autoconfiguration: echo "yes" - ;; + ;; *) exit 1 ;; diff --git a/plugins/powermta/powermta_vmta_recpients b/plugins/powermta/powermta_vmta_recpients index f4fb697e..5d7d2752 100755 --- a/plugins/powermta/powermta_vmta_recpients +++ b/plugins/powermta/powermta_vmta_recpients @@ -39,4 +39,4 @@ domain=`echo "${queue[7]}" | awk -F" " '{print $1}' | cut -d/ -f1 | sed -e 's/[\ recpts=${queue[8]} conns=${queue[10]} echo $domain'.value '$recpts -done +done diff --git a/plugins/printer/hp2600_count_ b/plugins/printer/hp2600_count_ index 185db3de..3ebffdd8 100755 --- a/plugins/printer/hp2600_count_ +++ b/plugins/printer/hp2600_count_ @@ -15,11 +15,11 @@ get_data (){ do_stats () { count=1 - for I in `get_data`; do - [ $count = 3 ] && echo "countb.value $I" - [ $count = 6 ] && echo "county.value $I" - [ $count = 9 ] && echo "countc.value $I" - [ $count = 12 ] && echo "countm.value $I" + for I in `get_data`; do + [ $count = 3 ] && echo "countb.value $I" + [ $count = 6 ] && echo "county.value $I" + [ $count = 9 ] && echo "countc.value $I" + [ $count = 12 ] && echo "countm.value $I" count=$(($count + 1)) done @@ -33,7 +33,7 @@ graph_title HP 2600 pages by cartridge statistics graph_vlabel Count (Pages) graph_category printing graph_info Pages count by color. -graph_args -l 0 +graph_args -l 0 graph_scale no line.label --- line.line 0 diff --git a/plugins/printer/hp2600_status_ b/plugins/printer/hp2600_status_ index dcb36a24..a02e91c0 100755 --- a/plugins/printer/hp2600_status_ +++ b/plugins/printer/hp2600_status_ @@ -15,11 +15,11 @@ get_data (){ do_stats () { count=1 - for I in `get_data`; do - [ $count = 1 ] && echo "statb.value $I" - [ $count = 4 ] && echo "staty.value $I" - [ $count = 7 ] && echo "statc.value $I" - [ $count = 10 ] && echo "statm.value $I" + for I in `get_data`; do + [ $count = 1 ] && echo "statb.value $I" + [ $count = 4 ] && echo "staty.value $I" + [ $count = 7 ] && echo "statc.value $I" + [ $count = 10 ] && echo "statm.value $I" count=$(($count + 1)) done @@ -29,11 +29,11 @@ case $1 in config) cat <<'EOF' host_name printers -graph_title HP 2600 cartridge status +graph_title HP 2600 cartridge status graph_vlabel Status (%) graph_category printing graph_info Toner status. -graph_args --upper-limit 400 -l 0 +graph_args --upper-limit 400 -l 0 line.label --- line.line 400 statb.label Black diff --git a/plugins/printer/snmp__hpclj b/plugins/printer/snmp__hpclj old mode 100644 new mode 100755 index ffa6083d..5b5b9e79 --- a/plugins/printer/snmp__hpclj +++ b/plugins/printer/snmp__hpclj @@ -140,7 +140,7 @@ tray3.min 0 tray3.max 100 "; } - + print "multigraph hpclj_pagecount graph_category printing graph_title HP Printer Page Counters @@ -182,11 +182,11 @@ sub printPercentageValue { my $field = $_[0]; my $oid_cur = $_[1]; my $oid_max = $_[2]; - + if(not oidExists($oid_cur) || not oidExists($oid_max)){ return(0); } - + my $val_max = $session->get_single($oid_max) || 'U'; my $val_cur = $session->get_single($oid_cur); if ($val_max ne 'U') { @@ -199,11 +199,11 @@ sub printValue { } my $field = $_[0]; my $oid = $_[1]; - + if(not oidExists($oid)){ return(0); } - + my $val_cur = $session->get_single($oid) || 'U'; if ($val_cur ne 'U') { print $field, ".value ", $val_cur, "\n"; @@ -215,7 +215,7 @@ sub oidExists { } my $oid = $_[0]; my $val = $session->get_single($oid); - + if(!length $val || $val eq 'noSuchInstance' || $val eq 'U'){ return(0); }else{ diff --git a/plugins/printer/xerox-wc3220 b/plugins/printer/xerox-wc3220 index dc695333..983217e6 100755 --- a/plugins/printer/xerox-wc3220 +++ b/plugins/printer/xerox-wc3220 @@ -27,7 +27,7 @@ As is. #%# family=contrib #%# capabilities=autoconf - + =cut case $1 in @@ -51,7 +51,7 @@ esac wget -q -o /dev/null -O $TMP_DIR/$PRINTER_IP-Supplies.html http://$PRINTER_IP/status/Supplies.html -TONER_STR=$(grep "" "$TMP_DIR/$PRINTER_IP-Supplies.html") +TONER_STR=$(grep "" "$TMP_DIR/$PRINTER_IP-Supplies.html") #99% echo -n "black.value " @@ -59,4 +59,4 @@ echo $TONER_STR | egrep -o "5%>[0-9]{1,2}" | egrep -o "[0-9]{1,2}$" rm $TMP_DIR/$PRINTER_IP-Supplies.html - + diff --git a/plugins/printer/xerox-wc7232-consumables b/plugins/printer/xerox-wc7232-consumables index d427da3f..72c8ef30 100755 --- a/plugins/printer/xerox-wc7232-consumables +++ b/plugins/printer/xerox-wc7232-consumables @@ -27,7 +27,7 @@ As is. #%# family=contrib #%# capabilities=autoconf - + =cut case $1 in @@ -84,10 +84,10 @@ wget -q -o /dev/null -O $TMP_DIR/$PRINTER_IP-stsply.htm http://$PRINTER_IP/stspl #toner and drum cartriges have both status and percentage -TONER_STR=$(grep Toner "$TMP_DIR/$PRINTER_IP-stsply.htm") +TONER_STR=$(grep Toner "$TMP_DIR/$PRINTER_IP-stsply.htm") #info=info.concat([['Toner Cartridges',[['Cyan Toner [C]',0,77],['Magenta Toner [M]',7,1],['Yellow Toner [Y]',7,1],['Black Toner [K]',0,39]],3]]); -DRUM_STR=$(grep Drum "$TMP_DIR/$PRINTER_IP-stsply.htm") +DRUM_STR=$(grep Drum "$TMP_DIR/$PRINTER_IP-stsply.htm") #info=info.concat([['Drum Cartridges',[['Drum Cartridges',0,79]],1]]); echo -n "cyan.value " @@ -106,4 +106,4 @@ echo -n "drum.value " echo $DRUM_STR | egrep -o "s',[0-9],[0-9]{1,2}" | egrep -o "[0-9]{1,2}$" rm $TMP_DIR/$PRINTER_IP-stsply.htm - + diff --git a/plugins/prosody/prosody_ b/plugins/prosody/prosody_ old mode 100644 new mode 100755 index f121e45b..4a3fab53 --- a/plugins/prosody/prosody_ +++ b/plugins/prosody/prosody_ @@ -1,5 +1,4 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- +#!/usr/bin/env python3 # Copyright (c) 2010 Christoph Heer (Christoph.Heer@googlemail.com) # # Permission is hereby granted, free of charge, to any person obtaining a @@ -25,6 +24,7 @@ import os import telnetlib import re + def main(): try: mode = sys.argv[1] @@ -35,125 +35,119 @@ def main(): port = int(os.environ.get('port', 5582)) if mode == "suggest": - print "c2s" - print "s2s" - print "presence" - print "uptime" - print "users" + print("c2s") + print("s2s") + print("presence") + print("uptime") + print("users") sys.exit(0) if wildcard == "c2s": if mode == "config": - print "graph_title Prosody C2S Connections" - print "graph_vlabel users" - print "graph_category chat" + print("graph_title Prosody C2S Connections") + print("graph_vlabel users") + print("graph_category chat") - print "all_client_connections.label client connections" - print "secure_client_connections.label secure client connections" - print "insecure_client_connections.label insecure client " \ - "connections" + print("all_client_connections.label client connections") + print("secure_client_connections.label secure client connections") + print("insecure_client_connections.label insecure client connections") sys.exit(0) else: connection_count_re = re.compile(r"Total:\s(\d+)\s") telnet = telnetlib.Telnet(host, port) - telnet.write("c2s:show_secure()\n") - telnet_response = telnet.read_until("secure client connections", - 5) - parsed_info = connection_count_re.findall(telnet_response) + telnet.write(b"c2s:show_secure()\n") + telnet_response = telnet.read_until(b"secure client connections", 5) + parsed_info = connection_count_re.findall(telnet_response.decode('ascii')) secure_client_connections = int(parsed_info[0]) - print "secure_client_connections.value %s" % \ - (secure_client_connections) + print("secure_client_connections.value %s" % secure_client_connections) - telnet.write("c2s:show_insecure()\n") - telnet_response = telnet.read_until("insecure client connections", - 5) - parsed_info = connection_count_re.findall(telnet_response) + telnet.write(b"c2s:show_insecure()\n") + telnet_response = telnet.read_until(b"insecure client connections", 5) + parsed_info = connection_count_re.findall(telnet_response.decode('ascii')) insecure_client_connections = int(parsed_info[0]) - print "insecure_client_connections.value %s" % \ - (insecure_client_connections) - all_client_connections = secure_client_connections + \ - insecure_client_connections - print "all_client_connections.value %s" % (all_client_connections) - telnet.write("quit\n") + print("insecure_client_connections.value %s" % insecure_client_connections) + all_client_connections = secure_client_connections + insecure_client_connections + print("all_client_connections.value %s" % (all_client_connections)) + telnet.write(b"quit\n") elif wildcard == "s2s": if mode == "config": - print "graph_title Prosody S2S Connections" - print "graph_vlabel servers" - print "graph_category chat" + print("graph_title Prosody S2S Connections") + print("graph_vlabel servers") + print("graph_category chat") - print "outgoing_connections.label outgoing connections" - print "incoming_connections.label incoming connections" + print("outgoing_connections.label outgoing connections") + print("incoming_connections.label incoming connections") sys.exit(0) else: server_connections_re = re.compile(r"(\d+) outgoing, (\d+)") telnet = telnetlib.Telnet(host, port) - telnet.write("s2s:show()\n") - telnet_response = telnet.read_until("connections", 5) - parsed_info = server_connections_re.findall(telnet_response) - print "outgoing_connections.value %s" % (parsed_info[0][0]) - print "incoming_connections.value %s" % (parsed_info[0][1]) - telnet.write("quit\n") + telnet.write(b"s2s:show()\n") + telnet_response = telnet.read_until(b"connections", 5) + parsed_info = server_connections_re.findall(telnet_response.decode('ascii')) + print("outgoing_connections.value %s" % (parsed_info[0][0])) + print("incoming_connections.value %s" % (parsed_info[0][1])) + telnet.write(b"quit\n") elif wildcard == "presence": if mode == "config": - print "graph_title Prosody Client Presence" - print "graph_vlabel clients" - print "graph_category chat" + print("graph_title Prosody Client Presence") + print("graph_vlabel clients") + print("graph_category chat") - print "available.label Avaible Clients" - print "chat.label Ready for Chat Clients" - print "away.label Away Clients" - print "xa.label Extended Away Clients" - print "dnd.label Do Not Disturb Clients" + print("available.label Available Clients") + print("chat.label Ready for Chat Clients") + print("away.label Away Clients") + print("xa.label Extended Away Clients") + print("dnd.label Do Not Disturb Clients") sys.exit(0) else: - client_presence_re = re.compile(r"[-\]] (.*?)\(\d+\)") + client_presence_re = re.compile(r"[\]] (.*?)\(\d+\)") telnet = telnetlib.Telnet(host, port) - telnet.write("c2s:show()\n") - telnet_response = telnet.read_until("clients", 5) - parsed_info = client_presence_re.findall(telnet_response) - print "available.value %s" % (parsed_info.count("available")) - print "chat.value %s" % (parsed_info.count("chat")) - print "away.value %s" % (parsed_info.count("away")) - print "xa.value %s" % (parsed_info.count("xa")) - print "dnd.value %s" % (parsed_info.count("dnd")) - telnet.write("quit\n") + telnet.write(b"c2s:show()\n") + telnet_response = telnet.read_until(b"clients", 5) + parsed_info = client_presence_re.findall(telnet_response.decode('ascii')) + print("available.value %s" % parsed_info.count("available")) + print("chat.value %s" % (parsed_info.count("chat"))) + print("away.value %s" % (parsed_info.count("away"))) + print("xa.value %s" % (parsed_info.count("xa"))) + print("dnd.value %s" % (parsed_info.count("dnd"))) + telnet.write(b"quit\n") elif wildcard == "uptime": if mode == "config": - print "graph_title Prosody Uptime" - print "graph_args --base 1000 -l 0" - print "graph_scale no" - print "graph_vlabel uptime in days" - print "graph_category chat" - print "graph_order uptime" - print "uptime.draw AREA" - print "uptime.min U" - print "uptime.max U" - print "uptime.label uptime" - print "uptime.type GAUGE" + print("graph_title Prosody Uptime") + print("graph_args --base 1000 -l 0") + print("graph_scale no") + print("graph_vlabel uptime in days") + print("graph_category chat") + print("graph_order uptime") + print("uptime.draw AREA") + print("uptime.min U") + print("uptime.max U") + print("uptime.label uptime") + print("uptime.type GAUGE") sys.exit(0) else: uptime_re = re.compile(r"\d+") telnet = telnetlib.Telnet(host, port) - telnet.write("server:uptime()\n") - telnet_response = telnet.read_until("minutes (", 5) - parsed_info = uptime_re.findall(telnet_response) - uptime_value = float(parsed_info[0]) + float(parsed_info[1])/24 +\ - float(parsed_info[2])/60/24 - print "uptime.value %s" % (uptime_value) - telnet.write("quit\n") + telnet.write(b"server:uptime()\n") + telnet_response = telnet.read_until(b"minutes (", 5) + parsed_info = uptime_re.findall(telnet_response.decode('ascii')) + uptime_value = (float(parsed_info[0]) + float(parsed_info[1]) / 24 + + float(parsed_info[2]) / 60 / 24) + print("uptime.value %s" % (uptime_value)) + telnet.write(b"quit\n") elif wildcard == "users": if mode == "config": - print "graph_title Prosody Registered Users" - print "graph_vlabel users" - print "graph_category chat" + print("graph_title Prosody Registered Users") + print("graph_vlabel users") + print("graph_category chat") base_dir = os.environ.get('internal_storage_path', "/var/lib/prosody") if os.path.isdir(base_dir): @@ -161,35 +155,40 @@ def main(): for vhost in vhosts: account_dir = os.path.join(base_dir, vhost, "accounts") if os.path.isdir(account_dir): - vhost = vhost.replace("%2e",".") - munin_var = vhost.replace(".","_") + vhost = vhost.replace("%2e", ".") + munin_var = vhost.replace(".", "_") if mode == "config": - print "%s.label %s" % (munin_var, vhost) + print("%s.label %s" % (munin_var, vhost)) else: accounts = len(list(listfiles(account_dir))) - print "%s.value %s" % (munin_var, accounts) + print("%s.value %s" % (munin_var, accounts)) + def listdirs(folder): for x in os.listdir(folder): if os.path.isdir(os.path.join(folder, x)): yield x + def listfiles(folder): for x in os.listdir(folder): if os.path.isfile(os.path.join(folder, x)): yield x + if __name__ == '__main__': main() -### Here starts the prosody_ plugin documentation, intended to be used with munindoc and in plugin gallery +# Here starts the prosody_ plugin documentation, intended to be used with munindoc and in +# plugin gallery. """ =head1 NAME prosody_ - Munin wildcard-plugin to monitor a L xmpp server. -This wildcard plugin provides at the moment only the suffixes C, C, C, C and C suffixes. +This wildcard plugin provides at the moment only the suffixes C, C, C, +C and C suffixes. =head1 INSTALLATION @@ -219,7 +218,7 @@ After the installation you need to restart your munin-node: =head1 CONFIGURATION -When you want to change the default host (localhost) and port (5582) do it in a file named prosody +When you want to change the default host (localhost) and port (5582) do it in a file named prosody placed in the directory /etc/munin/plugin-conf.d/ with a config like this: =over 2 @@ -230,7 +229,8 @@ placed in the directory /etc/munin/plugin-conf.d/ with a config like this: =back -If you want to get the number of registered users, add the following lines to /etc/munin/plugin-conf.d/prosody: +If you want to get the number of registered users, add the following lines to +/etc/munin/plugin-conf.d/prosody: =over 2 diff --git a/plugins/puma/puma_ b/plugins/puma/puma_ old mode 100644 new mode 100755 diff --git a/plugins/punbb/punbb_users b/plugins/punbb/punbb_users index e826400a..954233d9 100755 --- a/plugins/punbb/punbb_users +++ b/plugins/punbb/punbb_users @@ -42,11 +42,12 @@ my $type = undef; if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { - if ($ret) - { + if ($ret) { print "no ($ret)\n"; - exit 1; + } else { + print "yes\n"; } + exit 0; } @@ -55,7 +56,7 @@ if ( defined $ARGV[0] and $ARGV[0] eq "config" ) print "graph_title Users\n"; print "graph_args --base 1000\n"; print "graph_vlabel current users\n"; - print "graph_category Forum\n"; + print "graph_category forum\n"; print "graph_total Total\n"; print "members.label Members\n"; diff --git a/plugins/puppet/example-graphs/puppetdb-day.png b/plugins/puppet/example-graphs/puppetdb-day.png new file mode 100644 index 00000000..c731238f Binary files /dev/null and b/plugins/puppet/example-graphs/puppetdb-day.png differ diff --git a/plugins/puppet/puppet_runtime b/plugins/puppet/puppet_runtime index 8962f4df..30720582 100755 --- a/plugins/puppet/puppet_runtime +++ b/plugins/puppet/puppet_runtime @@ -1,4 +1,4 @@ -#!/usr/bin/ruby +#!/usr/bin/env ruby # This plugin reports the duration of the most recent puppet agent run. # It requires read access to the puppet logfile (defaults to /var/log/messages). @@ -17,29 +17,28 @@ def get_runtime logfile = ENV['puppet_logfile'] || '/var/log/messages' t = Time.now - dateformat = ENV['puppet_logformat'] || "^%b %d" + dateformat = ENV['puppet_logformat'] || '^%b %d' today = t.strftime(dateformat) File.open(logfile).grep(/#{today}/).grep(/Finished catalog run in/).reverse_each do |line| if line =~ /in (.*) seconds/ - puts "runtime.value #{$1}" + puts "runtime.value #{Regexp.last_match(1)}" exit 0 end end end case ARGV[0] - when 'config' - puts "graph_category devel" - puts "graph_args --base 1000 -l 0" - puts "graph_scale no" - puts "graph_title puppet catalog run time" - puts "graph_vlabel Seconds" - puts "runtime.label Catalog application time" - exit 0 - when 'autoconf' - puts "yes" - exit 0 - else - get_runtime +when 'config' + puts 'graph_category other' + puts 'graph_args --base 1000 -l 0' + puts 'graph_scale no' + puts 'graph_title puppet catalog run time' + puts 'graph_vlabel Seconds' + puts 'runtime.label Catalog application time' + exit 0 +when 'autoconf' + puts 'yes' + exit 0 +else + get_runtime end - diff --git a/plugins/puppet/puppetdb b/plugins/puppet/puppetdb new file mode 100755 index 00000000..a4d55aa4 --- /dev/null +++ b/plugins/puppet/puppetdb @@ -0,0 +1,202 @@ +#!/usr/bin/python3 +""" +=head1 NAME + +puppetdb - Create a graph out of PuppetDB's JVM memory usage. + +=head1 CONFIGURATION + +This plugin does not need to run with a privileged user. + +By default, the plugin will send requests to a PuppetDB instance on localhost. + +Plugin configuration parameters: + + * pdburl: + Set the URL to your PuppetDB instance. This url should point to the mbeans + endpoint. By default this has a value of + http://localhost:8080/metrics/v1/mbeans + + * timeout: + Time in seconds (int) to wait for a result when querying the REST API. By + default, wait for 2 seconds + + * ca: + Path to the Certificate Authority used for verifying a cert received from + the PuppetDB server during an https connection. This can be useful if the + cert used by PuppetDB was signed by the puppetmaster's CA. This option is + not necessary if a plaintext connection is used (e.g. if pdburl starts + with 'http://'). + + * cert: + Path to the TLS certificate file used for establishing client + communication for an https connection. This option should be paired with + the `key` option. This option is not necessary if a plaintext connection + is used (e.g. if pdburl starts with 'http://'). + + * key: + Path to the TLS private key used for establishing client communication for + an https connection. This option should be paired with the `cert` option. + This option is not necessary if a plaintext connection is used (e.g. if + pdburl starts with 'http://'). + +Example: + + [puppetdb] + env.pdburl https://puppetdb.example.com:8080/metrics/v1/mbeans + env.timeout 5 + env.ca /etc/puppetboard/ca.pem + env.cert /etc/puppetboard/client_cert.pem + env.key /etc/puppetboard/client_key.pem + +=head1 DEPENDENCIES + +python3-requests + +=head1 COMPATIBILITY + + * PuppetDB 6.x: https://puppet.com/docs/puppetdb/6.0/api/metrics/v1/mbeans.html#jvm-metrics + +=head1 AUTHOR + +Copyright (c) 2020, Gabriel Filion, gabster@lelutin.ca + +=head1 LICENSE + +This code is licensed under GPLv3+ + +""" + +import os +import sys +import requests + + +class WrongStatusCode(Exception): + pass + + +def rest_request(url, timeout, ca, key_pair): + """Make a GET request to URL. We expect a 200 response. + This function will let exceptions from requests raise through to indicate + request failure. + If response code is not 200, it will raise a WrongStatusCode exception. + """ + headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'} + + ssl_options = {} + if ca: + ssl_options['verify'] = ca + if key_pair: + ssl_options['cert'] = key_pair + + resp = requests.get(url, headers=headers, timeout=timeout, **ssl_options) + if resp.status_code != 200: + err = f"GET Request to '{url}' returned code {resp.status_code}; expected 200." # noqa: E501 + raise WrongStatusCode(err) + return resp + + +def config(): + """Print all graph configuration for munin.""" + print("graph_title PuppetDB JVM Memory usage") + print("graph_args --base 1024") + print("graph_vlabel Bytes") + print("graph_info This graph shows how much memory from the JVM " + + "Heapspace is being used by PuppetDB") + print("graph_category other") + print("graph_order jvm_mem_max jvm_mem_committed jvm_mem_used") + + # Fields + print("jvm_mem_max.min 0") + print("jvm_mem_max.label JVM Max mem") + print("jvm_mem_max.info Maximum memory allocated to the JVM") + + print("jvm_mem_committed.label JVM Committed mem") + print("jvm_mem_committed.min 0") + print("jvm_mem_committed.info Memory currently committed by the JVM") + + print("jvm_mem_used.label JVM Used mem") + print("jvm_mem_used.min 0") + print("jvm_mem_used.info Memory currently used by objects in the JVM") + print("jvm_mem_used.draw AREA") + + +def fetch_field_values(mbeans_url, timeout, ca, key_pair): + """Get values from PuppetDB and print them out.""" + memory_url = f"{mbeans_url}/java.lang:type=Memory" + + try: + mem_req = rest_request(memory_url, timeout, ca, key_pair) + except Exception as e: + print(f"HTTP Request did not complete successfully: {e}", + file=sys.stderr) + exit(1) + + try: + memory = mem_req.json() + except Exception as e: + print(f"Could not parse JSON, can't find the info we need: {e}", + file=sys.stderr) + exit(1) + + try: + heap = memory['HeapMemoryUsage'] + mem_max = heap['max'] + mem_committed = heap['committed'] + mem_used = heap['used'] + except Exception as e: + print(f"Memory values were not found in the reply JSON: {e}", + file=sys.stderr) + exit(1) + + print(f"jvm_mem_max.value {mem_max}") + print(f"jvm_mem_committed.value {mem_committed}") + print(f"jvm_mem_used.value {mem_used}") + + +if __name__ == '__main__': + mbeans_url = os.environ.get('pdburl', 'http://localhost:8080/metrics/v1/mbeans') + try: + timeout = int(os.environ.get('timeout', '2')) + except ValueError as e: + print(f"Invalid value for timeout: {e}", file=sys.stderr) + exit(1) + + ca = os.environ.get('ca', None) + if ca: + if not os.path.exists(ca): + print(f"CA file '{ca}' not found.", file=sys.stderr) + exit(1) + + cert = os.environ.get('cert', None) + key = os.environ.get('key', None) + if cert or key: + if cert and key: + if not os.path.exists(cert): + print(f"Certificate file '{cert}' not found.", file=sys.stderr) + exit(1) + if not os.path.exists(key): + print(f"Key file '{key}' not found.", file=sys.stderr) + exit(1) + else: + print("Only one of 'cert' and 'key' supplied. " + "Both are needed for client authentication.", + file=sys.stderr) + exit(1) + + if len(sys.argv) > 1 and sys.argv[1] == 'autoconf': + try: + dummy = rest_request(mbeans_url, timeout, ca, (cert, key)) + except Exception as e: + print(f"no ({e})") + exit(0) + + print("yes") + exit(0) + + if len(sys.argv) > 1 and sys.argv[1] == 'config': + config() + exit(0) + + fetch_field_values(mbeans_url, timeout, ca, (cert, key)) diff --git a/plugins/qmail/qmailsend b/plugins/qmail/qmailsend index 12a44ec2..01281639 100755 --- a/plugins/qmail/qmailsend +++ b/plugins/qmail/qmailsend @@ -2,7 +2,7 @@ # # Plugin to show amount of individual outgoing smtp-replies per hour # -# Contributed by Hkon Nessjen +# Contributed by Håkon Nessjøen # # Magic markers - optional - used by installation scripts and # munin-config: @@ -18,11 +18,10 @@ if (exists $ARGV[0]) { if ($ARGV[0] eq "autoconf") { if (-f "${logpath}current") { print "yes\n"; - exit 0; } else { print STDERR "no (Cannot find ${logpath}current. Please specify env.logpath)\n"; - exit 1; } + exit 0; } } @@ -68,7 +67,7 @@ if (exists $ARGV[0]) { print "graph_title Qmail outgoing SMTP replies\n"; print "graph_args --base 1000 -l 0 \n"; print "graph_vlabel replies/hour\n"; - print "graph_category Mail\n"; + print "graph_category mail\n"; print "graph_total Total\n" if (keys (%descriptions) > 1); print "graph_info This graph shows qmail-send transaction response codes.\n"; print "graph_order res" . join(" res", sort by_code keys %descriptions) . "\n"; diff --git a/plugins/qmail/qmailsend_plesk b/plugins/qmail/qmailsend_plesk index 22f2f7f7..34e94259 100755 --- a/plugins/qmail/qmailsend_plesk +++ b/plugins/qmail/qmailsend_plesk @@ -2,7 +2,7 @@ # # Plugin to show amount of individual outgoing smtp-replies per hour # -# Contributed by Hkon Nessjen +# Contributed by Håkon Nessjøen # # Magic markers - optional - used by installation scripts and # munin-config: @@ -18,11 +18,10 @@ if (exists $ARGV[0]) { if ($ARGV[0] eq "autoconf") { if (-f "${logpath}maillog") { print "yes\n"; - exit 0; } else { print STDERR "no (Cannot find ${logpath}maillog. Please specify env.logpath)\n"; - exit 1; } + exit 0; } } @@ -68,7 +67,7 @@ if (exists $ARGV[0]) { print "graph_title Qmail outgoing SMTP replies\n"; print "graph_args --base 1000 -l 0 \n"; print "graph_vlabel replies/hour\n"; - print "graph_category Mail\n"; + print "graph_category mail\n"; print "graph_total Total\n" if (keys (%descriptions) > 1); print "graph_info This graph shows qmail-send transaction response codes.\n"; print "graph_order res" . join(" res", sort by_code keys %descriptions) . "\n"; diff --git a/plugins/qmail/qremote b/plugins/qmail/qremote index 52746152..ddf8ec21 100755 --- a/plugins/qmail/qremote +++ b/plugins/qmail/qremote @@ -17,9 +17,9 @@ if [ "$1" = "config" ]; then cat < 0; my $where; @@ -82,7 +82,7 @@ sub do_stats { } } close $fh; - + save_state($stop_at); foreach my $rc (sort {$a<=>$b} keys %REJECT_CODES) { @@ -108,7 +108,7 @@ graph_total Total my $type; foreach $k (sort {$a<=>$b} keys %REJECT_CODES) { - print + print "r$k.label $REJECT_CODES{$k} r$k.type ABSOLUTE r$k.min 0 diff --git a/plugins/quake/qstat b/plugins/quake/qstat index 7438b2a8..32c645c6 100755 --- a/plugins/quake/qstat +++ b/plugins/quake/qstat @@ -13,7 +13,7 @@ qstat_exe='/usr/local/bin/qstat' #---------------------------------------------------------------# # End of config -script_name=$(basename $0) +script_name=$(basename "$0") ################################################################# ################################################################# @@ -29,9 +29,9 @@ usage() { config() { if [ "${script_name}" != "qstat_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) else gametype=$1 ip=$2 @@ -51,20 +51,20 @@ player.label players" #---------------------------------------------------------------# quake_stat() { if [ "${script_name}" != "qstat_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) else gametype=$1 ip=$2 port=$3 fi - if [ ! -z ${gametype} ] && [ ! -z ${gametype} ] && [ ! -z ${gametype} ]; then - dummy=$(${qstat_exe} -raw ";" -nh -${gametype} ${ip}:${port}) + if [ -n "$gametype" ] && [ -n "$gametype" ] && [ -n "$gametype" ]; then + dummy=$("$qstat_exe" -raw ";" -nh "-$gametype" "${ip}:${port}") - playervalue=$(echo ${dummy} | cut -d\; -f6) - maxplayervalue=$(echo ${dummy} | cut -d\; -f5) + playervalue=$(echo "$dummy" | cut -d\; -f6) + maxplayervalue=$(echo "$dummy" | cut -d\; -f5) if [ -z "${playervalue}" ]; then playervalue=0 @@ -88,7 +88,7 @@ quake_stat() { #---------------------------------------------------------------# case $1 in config) - config + config "$@" exit 0 ;; help | ?) @@ -99,7 +99,7 @@ case $1 in echo "no (edit the script for set qstat path)" ;; *) - quake_stat $1 $2 $3 + quake_stat "$@" exit 0 ;; esac diff --git a/plugins/quake/qstatcod4and5_ b/plugins/quake/qstatcod4and5_ new file mode 100755 index 00000000..e02dbd55 --- /dev/null +++ b/plugins/quake/qstatcod4and5_ @@ -0,0 +1,100 @@ +#!/bin/sh +################################################################# +# Title : Qstat plugin for Munin # +# Author : Benjamin DUPUIS - Poil # +# Email : poil@quake.fr # +# First release : 18/10/2007 # +#---------------------------------------------------------------# +# Edited: Rouven David Naßl - peperoni # +# Edit : 09/01/2009 # +# Plugin edited for COD4+COD5 # +# Email: peperoni@sac-esports.de # +#---------------------------------------------------------------# +################################################################# +# Variable : # +#---------------------------------------------------------------# +# Set path to QSTAT # +qstat_exe='/usr/local/bin/qstat' # +#---------------------------------------------------------------# +# End of config +script_name=$(basename "$0") +################################################################# + +################################################################# +# Help # +#---------------------------------------------------------------# +usage() { + echo 'For testing the script, run qstatcod4and5_ cods IP PORT' + echo ' - GameType : cods ... run qstat for seeing available gametype' + echo 'For munin you must ln -s /usr/share/munin/plugins/qstatcod4and5_ /etc/munin/plugins/cod4_cods_IP_PORT' + echo 'Example you will test this COD4 Server: 123.456.789.123:28960' + echo 'your symlink looks like this: ln -s /usr/share/munin/plugins/cod4server /etc/munin/plugins/cod4_cods_123.456.789.123_28960' + echo 'Perhaps you must have to set qstat_exe path, actually on'${qstat_exe}; + echo 'Have Fun' +} + +config() { + if [ "${script_name}" != "qstatcod4and5_" ]; then + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) + else + gametype=$1 + ip=$2 + port=$3 + fi + +echo "graph_title Number of players on ${gametype} - ${ip}:${port} +graph_vlabel players +graph_category games +player.label players" +} + +################################################################# +# Quake Stat, call qstat # +#---------------------------------------------------------------# +quake_stat() { + if [ "${script_name}" != "qstatcod4and5_" ]; then + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) + else + gametype=$1 + ip=$2 + port=$3 + fi + + if [ -n "$gametype" ] && [ -n "$gametype" ] && [ -n "$gametype" ]; then + dummy=$("$qstat_exe" -P -pa -sort P "-$gametype" "${ip}:${port}" | grep frags | wc -l) + playervalue=$dummy + + if [ -z "$playervalue" ]; then + playervalue=0 + fi + + echo "player.value $playervalue" + else + echo "player.value U" + fi +} + +################################################################# +# Main # +#---------------------------------------------------------------# +case $1 in + config) + config "$1" "$2" "$3" + exit 0 + ;; + help | ?) + usage + exit 0 + ;; + autoconf) + echo "no (edit the script for set qstat path)" + ;; + *) + quake_stat "$1" "$2" "$3" + exit 0 + ;; +esac diff --git a/plugins/quake/qstatet_ b/plugins/quake/qstatet_ index 105d4b67..8e68ad34 100755 --- a/plugins/quake/qstatet_ +++ b/plugins/quake/qstatet_ @@ -13,7 +13,7 @@ qstat_exe='/usr/bin/qstat' #---------------------------------------------------------------# # End of config -script_name=$(basename $0) +script_name=$(basename "$0") ################################################################# ################################################################# @@ -29,9 +29,9 @@ usage() { config() { if [ "${script_name}" != "qstat_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) else gametype=$1 ip=$2 @@ -51,20 +51,19 @@ player.label players" #---------------------------------------------------------------# quake_stat() { if [ "${script_name}" != "qstat_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) else gametype=$1 ip=$2 port=$3 fi - if [ ! -z ${gametype} ] && [ ! -z ${gametype} ] && [ ! -z ${gametype} ]; then - dummy=$(${qstat_exe} -P -pa -sort P -${gametype} ${ip}:${port} | grep frags | grep -wv 0ms | wc -l) - dummy2=$(${qstat_exe} -P -pa -sort P -${gametype} ${ip}:${port} | grep frags | grep -w 0ms | wc -l) - playervalue=$dummy - maxplayervalue=$dummy2 + if [ -n "$gametype" ] && [ -n "$gametype" ] && [ -n "$gametype" ]; then + stat_output=$("$qstat_exe" -P -pa -sort P "-$gametype" "${ip}:${port}" | grep frags) + playervalue=$(echo "$stat_output" | grep -cwv 0ms) + maxplayervalue=$(echo "$stat_output" | grep -cw 0ms) if [ -z "${playervalue}" ]; then playervalue=0 @@ -88,7 +87,7 @@ quake_stat() { #---------------------------------------------------------------# case $1 in config) - config + config "$@" exit 0 ;; help | ?) @@ -99,7 +98,7 @@ case $1 in echo "no (edit the script for set qstat path)" ;; *) - quake_stat $1 $2 $3 + quake_stat "$@" exit 0 ;; esac diff --git a/plugins/quake/qstatqw_ b/plugins/quake/qstatqw_ index 1538b87b..908b95f4 100755 --- a/plugins/quake/qstatqw_ +++ b/plugins/quake/qstatqw_ @@ -13,7 +13,7 @@ qstat_exe='/usr/bin/qstat' #---------------------------------------------------------------# # End of config -script_name=$(basename $0) +script_name=$(basename "$0") ################################################################# ################################################################# @@ -29,9 +29,9 @@ usage() { config() { if [ "${script_name}" != "qstat_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) else gametype=$1 ip=$2 @@ -51,20 +51,19 @@ player.label players" #---------------------------------------------------------------# quake_stat() { if [ "${script_name}" != "qstat_" ]; then - gametype=$(echo ${script_name} | cut -d_ -f2) - ip=$(echo ${script_name} | cut -d_ -f3) - port=$(echo ${script_name} | cut -d_ -f4) + gametype=$(echo "$script_name" | cut -d_ -f2) + ip=$(echo "$script_name" | cut -d_ -f3) + port=$(echo "$script_name" | cut -d_ -f4) else gametype=$1 ip=$2 port=$3 fi - if [ ! -z ${gametype} ] && [ ! -z ${gametype} ] && [ ! -z ${gametype} ]; then - dummy=$(${qstat_exe} -P -pa -sort P -${gametype} ${ip}:${port} | grep team | grep -wv 0ms | wc -l) - dummy2=$(${qstat_exe} -P -pa -sort P -${gametype} ${ip}:${port} | grep team | grep -w 0ms | wc -l) - playervalue=$dummy - maxplayervalue=$dummy2 + if [ -n "$gametype" ] && [ -n "$gametype" ] && [ -n "$gametype" ]; then + stat_output=$("$qstat_exe" -P -pa -sort P "-${gametype}" "${ip}:${port}" | grep team) + playervalue=$(echo "$stat_output" | grep -cwv 0ms) + maxplayervalue=$(echo "$stat_output" | grep -cw 0ms) if [ -z "${playervalue}" ]; then playervalue=0 @@ -74,7 +73,6 @@ quake_stat() { maxplayervalue=0 fi - echo "maxplayer.value "${maxplayervalue}; echo "player.value "${playervalue}; else @@ -88,7 +86,7 @@ quake_stat() { #---------------------------------------------------------------# case $1 in config) - config + config "$@" exit 0 ;; help | ?) @@ -99,8 +97,7 @@ case $1 in echo "no (edit the script for set qstat path)" ;; *) - quake_stat $1 $2 $3 + quake_stat "$@" exit 0 ;; esac - diff --git a/plugins/rabbitmq/rabbitmq_connections b/plugins/rabbitmq/rabbitmq_connections index e0fc2a89..73f770ec 100755 --- a/plugins/rabbitmq/rabbitmq_connections +++ b/plugins/rabbitmq/rabbitmq_connections @@ -53,7 +53,7 @@ case $(whoami) in esac # If run with the "config"-parameter, give out information on how the -# graphs should look. +# graphs should look. if [ "$1" = "config" ]; then CONN_WARN=${conn_warn:-500} @@ -89,11 +89,11 @@ fi # real work - i.e. display the data. Almost always this will be # "value" subfield for every data field. -if hash rabbitmqctl >/dev/null 2>&1; then +if command -v rabbitmqctl >/dev/null 2>&1; then connections=$(HOME=/tmp rabbitmqctl list_connections state | grep -c running) else echo "$0: Could not run rabbitmqctl" >&2 connections=U fi -printf "connections.value %s\n" "$connections" +printf 'connections.value %s\n' "$connections" diff --git a/plugins/rabbitmq/rabbitmq_consumers b/plugins/rabbitmq/rabbitmq_consumers index 76ce6759..b3ac555b 100755 --- a/plugins/rabbitmq/rabbitmq_consumers +++ b/plugins/rabbitmq/rabbitmq_consumers @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the queues of a virtual_host in RabbitMQ # # Usage: Link or copy into /etc/munin/node.d/ @@ -26,14 +26,14 @@ if [ "$1" = "autoconf" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. +# graphs should look. HOME=/tmp/ VHOST=${vhost:-"/"} QUEUES=$(HOME=$HOME rabbitmqctl list_queues -p $VHOST name | \ grep -v '^Listing' | \ grep -v 'done\.$' | sed -e 's/[.=-]/_/g' ) - + if [ "$1" = "config" ]; then QUEUE_WARN=${queue_warn:-100} QUEUE_CRIT=${queue_crit:-500} diff --git a/plugins/rabbitmq/rabbitmq_messages b/plugins/rabbitmq/rabbitmq_messages index a1b8f688..ca7cece9 100755 --- a/plugins/rabbitmq/rabbitmq_messages +++ b/plugins/rabbitmq/rabbitmq_messages @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the queues of a virtual_host in RabbitMQ # # Usage: Link or copy into /etc/munin/node.d/ @@ -26,14 +26,14 @@ if [ "$1" = "autoconf" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. +# graphs should look. HOME=/tmp/ VHOST=${vhost:-"/"} QUEUES=$(HOME=$HOME rabbitmqctl list_queues -p $VHOST name | \ grep -v '^Listing' | \ grep -v 'done\.$' | sed -e 's/[.=-]/_/g' ) - + if [ "$1" = "config" ]; then QUEUE_WARN=${queue_warn:-10000} QUEUE_CRIT=${queue_crit:-20000} diff --git a/plugins/rabbitmq/rabbitmq_messages_unacknowledged b/plugins/rabbitmq/rabbitmq_messages_unacknowledged index d5d8f60f..a36a0a4d 100755 --- a/plugins/rabbitmq/rabbitmq_messages_unacknowledged +++ b/plugins/rabbitmq/rabbitmq_messages_unacknowledged @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the queues of a virtual_host in RabbitMQ # # Usage: Link or copy into /etc/munin/node.d/ @@ -26,14 +26,14 @@ if [ "$1" = "autoconf" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. +# graphs should look. HOME=/tmp/ VHOST=${vhost:-"/"} QUEUES=$(HOME=$HOME rabbitmqctl list_queues -p $VHOST name | \ grep -v '^Listing' | \ grep -v 'done\.$' | sed -e 's/[.=-]/_/g' ) - + if [ "$1" = "config" ]; then QUEUE_WARN=${queue_warn:-10000} QUEUE_CRIT=${queue_crit:-20000} diff --git a/plugins/rabbitmq/rabbitmq_messages_uncommitted b/plugins/rabbitmq/rabbitmq_messages_uncommitted index 179d2e60..c3e6fadf 100755 --- a/plugins/rabbitmq/rabbitmq_messages_uncommitted +++ b/plugins/rabbitmq/rabbitmq_messages_uncommitted @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the queues of a virtual_host in RabbitMQ # # Usage: Link or copy into /etc/munin/node.d/ @@ -26,14 +26,14 @@ if [ "$1" = "autoconf" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. +# graphs should look. HOME=/tmp/ VHOST=${vhost:-"/"} QUEUES=$(HOME=$HOME rabbitmqctl list_queues -p $VHOST name | \ grep -v '^Listing' | \ grep -v 'done\.$' | sed -e 's/[.=-]/_/g' ) - + if [ "$1" = "config" ]; then QUEUE_WARN=${queue_warn:-10000} QUEUE_CRIT=${queue_crit:-20000} diff --git a/plugins/rabbitmq/rabbitmq_queue_memory b/plugins/rabbitmq/rabbitmq_queue_memory index fef2cc7b..253a15a6 100755 --- a/plugins/rabbitmq/rabbitmq_queue_memory +++ b/plugins/rabbitmq/rabbitmq_queue_memory @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the queues of a virtual_host in RabbitMQ # # Usage: Link or copy into /etc/munin/node.d/ @@ -26,14 +26,14 @@ if [ "$1" = "autoconf" ]; then fi # If run with the "config"-parameter, give out information on how the -# graphs should look. +# graphs should look. HOME=/tmp/ VHOST=${vhost:-"/"} QUEUES=$(HOME=$HOME rabbitmqctl list_queues -p $VHOST name | \ grep -v '^Listing' | \ grep -v 'done\.$' | sed -e 's/[.=-]/_/g' ) - + if [ "$1" = "config" ]; then QUEUE_WARN=${queue_warn:-104857600} # 100 MB QUEUE_CRIT=${queue_crit:-209715200} # 200 MB diff --git a/plugins/rackspace/README b/plugins/rackspace/README index 9faf8c8d..25ba1fd1 100644 --- a/plugins/rackspace/README +++ b/plugins/rackspace/README @@ -1,5 +1,5 @@ ====================================================================================== -These plugins are made to monitor RackSpace Cloudfiles storage usage and files +These plugins are made to monitor RackSpace Cloudfiles storage usage and files count. ====================================================================================== diff --git a/plugins/rackspace/rackspace_cdn_count.php b/plugins/rackspace/rackspace_cdn_count.php old mode 100644 new mode 100755 index d1ee8cbb..ca2e66b5 --- a/plugins/rackspace/rackspace_cdn_count.php +++ b/plugins/rackspace/rackspace_cdn_count.php @@ -5,7 +5,7 @@ # Parameters: # # config (required) -# +# # #%# family=manual @@ -17,8 +17,8 @@ $api_url='https://auth.api.rackspacecloud.com/v1.0/'; function SplitTwice($content,$first,$second) { $s1=split($first,$content); - $splitted=split($second,$s1[1]); - return trim($splitted[0]); + $tokens=split($second,$s1[1]); + return trim($tokens[0]); } diff --git a/plugins/rackspace/rackspace_cdn_size.php b/plugins/rackspace/rackspace_cdn_size.php old mode 100644 new mode 100755 index b3b836fd..5f50257b --- a/plugins/rackspace/rackspace_cdn_size.php +++ b/plugins/rackspace/rackspace_cdn_size.php @@ -16,8 +16,8 @@ $api_url='https://auth.api.rackspacecloud.com/v1.0/'; function SplitTwice($content,$first,$second) { $s1=split($first,$content); - $splitted=split($second,$s1[1]); - return trim($splitted[0]); + $tokens=split($second,$s1[1]); + return trim($tokens[0]); } diff --git a/plugins/radiator/radiator_acct_lag b/plugins/radiator/radiator_acct_lag index be9ab4e8..217339cd 100755 --- a/plugins/radiator/radiator_acct_lag +++ b/plugins/radiator/radiator_acct_lag @@ -23,11 +23,10 @@ if [ "$statisticsdir" ]; then STATISTICSDIR=$statisticsdir ; fi if [ "$1" = "autoconf" ]; then if [ -d ${STATISTICSDIR} ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi @@ -36,7 +35,7 @@ if [ "$1" = "config" ]; then echo 'graph_vlabel time' echo 'graph_args -l 0' echo 'graph_category auth' - echo 'graph_info This graph displayes ACCT response lag of all radiator processes combined.' + echo 'graph_info This graph displays ACCT response lag of all radiator processes combined.' # # Count all statistics files and create labels for COUNT in `ls $STATISTICSDIR/Statistics-acct* | sed -r 's/acct([1-9]{1})\.log$/acct0\1\.log/g' | sort | awk -F"acct" '{print $2}' | sed 's/\.log//g'`; do diff --git a/plugins/radiator/radiator_acct_ppm b/plugins/radiator/radiator_acct_ppm index 04ef8687..af8d74f6 100755 --- a/plugins/radiator/radiator_acct_ppm +++ b/plugins/radiator/radiator_acct_ppm @@ -23,11 +23,10 @@ if [ "$statisticsdir" ]; then STATISTICSDIR=$statisticsdir ; fi if [ "$1" = "autoconf" ]; then if [ -d ${STATISTICSDIR} ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi diff --git a/plugins/radiator/radiator_auth_lag b/plugins/radiator/radiator_auth_lag index 9b33b0d1..c8179286 100755 --- a/plugins/radiator/radiator_auth_lag +++ b/plugins/radiator/radiator_auth_lag @@ -23,11 +23,10 @@ if [ "$statisticsdir" ]; then STATISTICSDIR=$statisticsdir ; fi if [ "$1" = "autoconf" ]; then if [ -d ${STATISTICSDIR} ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi @@ -36,7 +35,7 @@ if [ "$1" = "config" ]; then echo 'graph_vlabel time' echo 'graph_args -l 0' echo 'graph_category auth' - echo 'graph_info This graph displayes AUTH response lag of all radiator processes combined.' + echo 'graph_info This graph displays AUTH response lag of all radiator processes combined.' # # Count all statistics files and create labels for COUNT in `ls $STATISTICSDIR/Statistics-auth* | sed -r 's/auth([1-9]{1})\.log$/auth0\1\.log/g' | sort | awk -F"auth" '{print $2}' | sed 's/\.log//g'`; do diff --git a/plugins/radiator/radiator_auth_ppm b/plugins/radiator/radiator_auth_ppm index 0e4c5d14..46dff61d 100755 --- a/plugins/radiator/radiator_auth_ppm +++ b/plugins/radiator/radiator_auth_ppm @@ -23,11 +23,10 @@ if [ "$statisticsdir" ]; then STATISTICSDIR=$statisticsdir ; fi if [ "$1" = "autoconf" ]; then if [ -d ${STATISTICSDIR} ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi @@ -37,7 +36,7 @@ if [ "$1" = "config" ]; then echo 'graph_vlabel packets per minute' echo 'graph_args -l 0' echo 'graph_category auth' - echo 'graph_info This graph displayes AUTH packets of all radiator processes combined.' + echo 'graph_info This graph displays AUTH packets of all radiator processes combined.' # # Count all statistics files and create labels for COUNT in `ls $STATISTICSDIR/Statistics-auth* | sed -r 's/auth([1-9]{1})\.log$/auth0\1\.log/g' | sort | awk -F"auth" '{print $2}' | sed 's/\.log//g'`; do diff --git a/plugins/raspberry-pi/cpu_freq_1sec b/plugins/raspberry-pi/cpu_freq_1sec index 62a26523..6bd0d6bf 100755 --- a/plugins/raspberry-pi/cpu_freq_1sec +++ b/plugins/raspberry-pi/cpu_freq_1sec @@ -1,8 +1,8 @@ #! /bin/sh -# -# This is a small supersampling plugin that does +# +# This is a small supersampling plugin that does # cpu sampling every 1 second. -# +# # (c) 2013 - LGPL - Steve Schnepp pluginfull="$0" # full name of plugin diff --git a/plugins/raspberry-pi/raspi_temp b/plugins/raspberry-pi/raspi_temp index 733271b7..5918b4e8 100755 --- a/plugins/raspberry-pi/raspi_temp +++ b/plugins/raspberry-pi/raspi_temp @@ -48,4 +48,4 @@ EOM exit 0;; esac -echo "temp.value `vcgencmd measure_temp | sed -n "s/temp=\(.*\)'C/\1/p"`" +echo "temp.value $(vcgencmd measure_temp | sed -n 's/temp=\(.*\)'"'"'C/\1/p')" diff --git a/plugins/raspberry-pi/rpi_temp b/plugins/raspberry-pi/rpi_temp index 8eff2283..3fe34b1f 100755 --- a/plugins/raspberry-pi/rpi_temp +++ b/plugins/raspberry-pi/rpi_temp @@ -1,11 +1,6 @@ #! /bin/sh # (c) 2013 - LGPL - Steve Schnepp -pluginfull="$0" # full name of plugin -plugin="${0##*/}" # name of plugin -pidfile="$MUNIN_PLUGSTATE/munin.$plugin.pid" -cache="$MUNIN_PLUGSTATE/munin.$plugin.value" - if [ ! -r "/sys/class/thermal/thermal_zone0/temp" ] then @@ -28,6 +23,6 @@ fi # values TEMP_MILLI_C=$(cat /sys/class/thermal/thermal_zone0/temp) -echo thermal_zone0.value $( echo "scale=3; $TEMP_MILLI_C / 1000" | bc ) +echo "thermal_zone0.value $(echo "$TEMP_MILLI_C" | awk '{printf("%0.3f", $1 / 1000)}')" exit 0 diff --git a/plugins/raspberry-pi/w1_ b/plugins/raspberry-pi/w1_ new file mode 100755 index 00000000..0d745538 --- /dev/null +++ b/plugins/raspberry-pi/w1_ @@ -0,0 +1,84 @@ +#!/bin/sh +# -*- sh -*- + +: << =cut + +=head1 NAME + +w1_ - Plugin to monitor 1-wire temperature sensors (DS1820) + +=head1 CONFIGURATION + +The following environment variables are used by this plugin + + sensorid - Sensor to check. By default taken from command name. + t_warn - Warning limit for nagios notification + t_crit - Critical limit for nagios notification + + +=head1 AUTHOR + +Copyright (C) 2016 Roland Steinbach + +=head1 LICENSE + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 dated June, 1991. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301 USA. + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest +=cut + +. $MUNIN_LIBDIR/plugins/plugin.sh + +case $0 in + *w1_*) + sensor_id=${0##*/w1_} + ;; +esac + +if [ "$1" = "suggest" ]; then + if [ -r /sys/bus/w1/devices ]; then + ls /sys/bus/w1/devices|grep -v bus_master + fi + exit 0 +fi + +if [ "$1" = "autoconf" ]; then + if [ -r /sys/bus/w1/devices ]; then + echo yes + exit 0 + else + echo "no (/sys/bus/w1/devices not found)" + exit 0 + fi +fi + +if [ "$1" = "config" ]; then + echo graph_title Temperature Sensor $sensor_id + echo 'graph_args --base 1000 -l 0' + echo 'graph_vlabel temperature (°C)' + echo 'graph_category sensors' + echo 'graph_info This graph shows 1-wire sensor temperature.' + echo "w1.label $sensor_id" + echo "w1.info Temperature at $sensor_id." + print_warning w1 + print_critical w1 + exit 0 +fi + + +sed -n '/t=/ s/.*t=//p' /sys/bus/w1/devices/$sensor_id/w1_slave | awk '{print "w1.value", $1/1000}' diff --git a/plugins/reddit_karma/reddit_karma_ b/plugins/reddit_karma/reddit_karma_ index b7389846..3d3ead68 100755 --- a/plugins/reddit_karma/reddit_karma_ +++ b/plugins/reddit_karma/reddit_karma_ @@ -3,7 +3,7 @@ # reddit_karma_ ########################## # Munin Plugin to track the karma activity of a Reddit user. -# +# # Copyright 2012 Mark Caudill # # This program is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ # (at your option) any later version. # # This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of +# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # @@ -32,7 +32,7 @@ reddit_user=${0##*reddit_karma_} ## if [ "$1" = "autoconf" ]; then # Check that curl is installed - if hash curl >/dev/null 2>&1; then + if command -v curl >/dev/null 2>&1; then echo "yes" else echo "no (no curl installed)" @@ -49,7 +49,7 @@ if [ "$1" = "config" ]; then echo 'graph_args --base 1000' echo 'graph_scale no' echo 'graph_vlabel Link Karma' - echo 'graph_category other' + echo 'graph_category forum' echo 'comment_karma.label Comment Karma' echo 'comment_karma.draw LINE' echo 'link_karma.label Link Karma' @@ -61,8 +61,9 @@ fi # Main ## # Get current karma stats. -link_karma=$(curl -s http://www.reddit.com/user/${reddit_user}/about.json | grep -Eo 'link_karma": [0-9]+' | cut -d' ' -f2) -comment_karma=$(curl -s http://www.reddit.com/user/${reddit_user}/about.json | grep -Eo 'comment_karma": [0-9]+' | cut -d' ' -f2) +about_user_url="http://www.reddit.com/user/${reddit_user}/about.json" +link_karma=$(curl -s "$about_user_url" | grep -Eo 'link_karma": [0-9]+' | cut -d' ' -f2) +comment_karma=$(curl -s "$about_user_url" | grep -Eo 'comment_karma": [0-9]+' | cut -d' ' -f2) # Output karma stats. echo "link_karma.value $link_karma" diff --git a/plugins/redis/redis_ b/plugins/redis/redis_ index 55474435..fd0fc73c 100755 --- a/plugins/redis/redis_ +++ b/plugins/redis/redis_ @@ -59,7 +59,7 @@ if ( $autoconf ) { my $suggest = ( defined $ARGV[0] and $ARGV[0] eq "suggest" ); if ( $suggest ) { if ( defined( $sock ) ) { - my @plugins = ('connected_clients', 'key_ratio', 'keys_per_sec', 'per_sec', 'used_keys', 'used_memory'); + my @plugins = ('connected_clients', 'key_ratio', 'keys_per_sec', 'per_sec', 'repl_backlog_size', 'repl_lag', 'used_keys', 'used_memory'); foreach my $plugin (@plugins) { print "$plugin\n"; } @@ -128,7 +128,7 @@ switch ($0) { print "missratio.draw STACK\n"; exit 0; } - + my $total = $hash->{'keyspace_hits'} + $hash->{'keyspace_misses'}; my $hitratio = 0; my $missratio = 0; @@ -143,7 +143,7 @@ switch ($0) { case "per_sec" { if ( $config ) { - print "graph_title ${TITLE_PREFIX}Per second\n"; + print "graph_title ${TITLE_PREFIX}Requests Per second\n"; print "graph_vlabel per \${graph_period}\n"; print "graph_category search\n"; print "graph_args -l 0\n"; @@ -158,6 +158,34 @@ switch ($0) { print "connections.value ". $hash->{'total_connections_received'} ."\n"; } + case "repl_backlog_size" { + if ( $config ) { + print "graph_title ${TITLE_PREFIX}replication backlog\n"; + print "graph_vlabel replication backlog\n"; + print "graph_category search\n"; + print "graph_args -l 0\n"; + print "repl_backlog_size.label bytes behind master\n"; + exit 0; + } + + print "repl_backlog_size.value " . $hash->{'repl_backlog_size'} . "\n"; + } + + case "repl_lag" { + if ( $config ) { + print "graph_title ${TITLE_PREFIX}replication lag\n"; + print "graph_vlabel replication lag\n"; + print "graph_category search\n"; + print "graph_args -l 0\n"; + print "repl_backlog_size.label amount behind master\n"; + exit 0; + } + + if ($hash->{slave0} =~ /lag=(\d+)/) { + print "repl_backlog_size.value " . $1 . "\n"; + } + } + case "used_memory" { if ( $config ) { @@ -177,7 +205,7 @@ switch ($0) { print "used_memory_rss.value ". $hash->{'used_memory_rss'} ."\n"; print "used_memory_peak.value ". $hash->{'used_memory_peak'} ."\n"; } - + case "used_keys" { my $dbs; foreach my $key (keys %{$hash}) { @@ -210,18 +238,18 @@ switch ($0) { close ($sock); sub get_conn { - + my $sock; - + if( $UNIX_SOCKET && -S $UNIX_SOCKET ){ - + $sock = IO::Socket::UNIX->new( Type => SOCK_STREAM(), Peer => $UNIX_SOCKET, ); - + }else{ - + $sock = IO::Socket::INET->new( PeerAddr => $HOST, PeerPort => $PORT, @@ -229,7 +257,7 @@ sub get_conn { Proto => 'tcp' ); } - + if ( defined( $PASSWORD ) ) { print $sock "AUTH ", $PASSWORD, "\r\n"; my $result = <$sock> || die "can't read socket: $!"; diff --git a/plugins/redis/resque b/plugins/redis/resque index 23b75c73..c1a0dc58 100755 --- a/plugins/redis/resque +++ b/plugins/redis/resque @@ -120,8 +120,8 @@ elsif ($opt eq 'queues_size') { my $name = $queue; $name =~ s/:/_/; - my $size = $r->llen("${NAMESPACE}:queue:${queue}") || 0; - + my $size = $r->llen("${NAMESPACE}:queue:${queue}") || 0; + $total += $size; print "${name}_size.value ${size}\n"; diff --git a/plugins/relayd/relayd b/plugins/relayd/relayd index 8f1e1147..e43400c2 100755 --- a/plugins/relayd/relayd +++ b/plugins/relayd/relayd @@ -68,13 +68,13 @@ my $cmd = (defined($ARGV[0])) ? $ARGV[0] : ''; my @hosts = (); open(my $conf, "<", $configfile) or die "can't open $configfile: $!"; my $content = join("", <$conf>); -while ( $content =~ /table\s*<([^>]+)>\s*{([^}]+)}/g) { +while ( $content =~ /^\s*table\s*<([^>]+)>\s*{([^}]+)}/mg) { my $hosts = $2; print "table: $1, " if $Munin::Plugin::DEBUG; $hosts =~ s/#.*$//mg; # comments $hosts =~ s/^\s+//mg; # trim spaces before lines print "hosts: $hosts\n" if $Munin::Plugin::DEBUG; - push @hosts , split /\s+/, $hosts; + push @hosts , split /,?\s+/, $hosts; } if ($cmd eq 'config') { @@ -82,7 +82,7 @@ if ($cmd eq 'config') { print("graph_title Relayd host availability\n"); print("graph_args --upper-limit 100\n"); print("graph_vlabel % availability\n"); - print("graph_category Load balancer\n"); + print("graph_category loadbalancer\n"); print("graph_info Ratio of time when this host was up. This is provided by relayd itself (not averaged by this plugin)\n"); for my $host (@hosts) { my $clean = clean_host($host); @@ -92,7 +92,7 @@ if ($cmd eq 'config') { print("graph_title Relayd host incidents\n"); print("graph_args --lower-limit 0\n"); print("graph_vlabel down incidents\n"); - print("graph_category Load balancer\n"); + print("graph_category loadbalancer\n"); print("graph_info Number of times this host went down\n"); for my $host (@hosts) { my $clean = clean_host($host); diff --git a/plugins/rethinkdb/rethinkdb_node_io b/plugins/rethinkdb/rethinkdb_node_io old mode 100644 new mode 100755 index 91893568..a81f795b --- a/plugins/rethinkdb/rethinkdb_node_io +++ b/plugins/rethinkdb/rethinkdb_node_io @@ -12,7 +12,7 @@ host. Patches which remove this limitation are very welcome. If your port / host is somewhat else than the default - localhost:28015, and/or your database-server differes in name from + localhost:28015, and/or your database-server differs in name from `hostname` (short hostname), you can add rethinkdb-node-io config vars like: [rethinkdb_*] @@ -80,7 +80,7 @@ def print_config(servername): def check_autoconf() -> bool: - # this might be too easy, but gonna try. + # this might be too easy, but it is a good indication. if which("rethinkdb"): return True return False diff --git a/plugins/riak/riak_fsm_time_95 b/plugins/riak/riak_fsm_time_95 old mode 100644 new mode 100755 diff --git a/plugins/router/ag241-adsl b/plugins/router/ag241-adsl index de0d11ee..e4063276 100755 --- a/plugins/router/ag241-adsl +++ b/plugins/router/ag241-adsl @@ -1,51 +1,55 @@ -#!/usr/bin/ruby -# munin plugin to retrieve connection statistics from the web admin interface -# on a Linksys AG241v2 ADSL modem -# Makes use of the http://modemaddress/ADSLCStatus.htm page +#!/usr/bin/env ruby -#This plugin has only been tested on a Debian testing system +=begin -# This modem also has some basic SNMP support so you can configure it -# as per the instructions on the munin wiki -# http://munin.projects.linpro.no/wiki/Using_SNMP_plugins -# By default the SNMP server is disabled, you can enable it in the web admin -# You will need to set up the "virtual node" configuration as detailed -# for snmp plugins +munin plugin to retrieve connection statistics from the web admin interface +on a Linksys AG241v2 ADSL modem +Makes use of the http://modemaddress/ADSLCStatus.htm page -# Plugin will require some configuration in /etc/munin/plugin-conf.d/ag241_MODEMADDRESS -# e.g. -# [ag241_vocume.stargate_*] -# env.user admin -# env.pass password -# #env.port 80 +This plugin has only been tested on a Debian testing system -# Once you have the above config set you will need to symlink the plugin to -# /etc/munin/plugins/ag241_MODEMADDRESS_syncrate -# /etc/munin/plugins/ag241_MODEMADDRESS_attenutation -# /etc/munin/plugins/ag241_MODEMADDRESS_noise -# now restart munin-node. -# hopefully in 20-30mins you will have some nice graphs +This modem also has some basic SNMP support so you can configure it +as per the instructions on the munin wiki +http://munin.projects.linpro.no/wiki/Using_SNMP_plugins +By default the SNMP server is disabled, you can enable it in the web admin +You will need to set up the "virtual node" configuration as detailed +for snmp plugins +Plugin will require some configuration in /etc/munin/plugin-conf.d/ag241_MODEMADDRESS +e.g. +[ag241_vocume.stargate_*] +env.user admin +env.pass password +#env.port 80 -#Some magical munin foo... +Once you have the above config set you will need to symlink the plugin to +/etc/munin/plugins/ag241_MODEMADDRESS_syncrate +/etc/munin/plugins/ag241_MODEMADDRESS_attenutation +/etc/munin/plugins/ag241_MODEMADDRESS_noise +now restart munin-node. +hopefully in 20-30mins you will have some nice graphs + +Some magical munin foo... #%# family=manual #%# capabilities= +=end + # Require this module, it is part of the standard ruby lib AFAIK require 'net/http' -#default parameters -host = nil +# default parameters +host = nil port = ENV['port'] || 80 user = ENV['user'] || 'admin' -pass = ENV['pass'] || 'forhax' #dont remember what the default admin password was +pass = ENV['pass'] || 'forhax' # don't remember what the default admin password was stat = nil -# Check executeable "name" for parameter count +# Check executable "name" for parameter count params = $0.split('_') if params.size != 3 - puts "Incorrect number of parameters" - exit 1 + puts 'Incorrect number of parameters' + exit 1 end # first param after the plugin name is the host to query @@ -54,79 +58,79 @@ host = params[1] stat = params[2] unless ENV['debug'].nil? - puts "user = "+ user - puts "pass = "+ pass - puts "host = "+ host - puts "port = "+ port - puts "stat = "+ stat + puts 'user = ' + user + puts 'pass = ' + pass + puts 'host = ' + host + puts 'port = ' + port + puts 'stat = ' + stat end # Dump the graph configuration data if ARGV[0] == 'config' - puts 'host_name ' + host - puts 'graph_category network' + puts 'host_name ' + host + puts 'graph_category network' - case stat - when 'syncrate' - puts 'graph_info This graph shows the ADSL line sync rate.' - puts 'graph_title ADSL line sync rate' - puts 'graph_vlabel connection rate bits / second' - puts 'graph_args --base 1000 -l 0 ' - when 'attenuation' - puts 'graph_info This graph shows the ADSL line attenuation.' - puts 'graph_title ADSL line attenuation' - puts 'graph_vlabel attenuation dB' - when 'margin','noise' - puts 'graph_info This graph shows the ADSL SNR margin.' - puts 'graph_title ADSL line SNR margin' - puts 'graph_vlabel noise margin dB' - end - puts 'down.label downstream' - puts 'up.label upstream' - exit 0 + case stat + when 'syncrate' + puts 'graph_info This graph shows the ADSL line sync rate.' + puts 'graph_title ADSL line sync rate' + puts 'graph_vlabel connection rate bits / second' + puts 'graph_args --base 1000 -l 0 ' + when 'attenuation' + puts 'graph_info This graph shows the ADSL line attenuation.' + puts 'graph_title ADSL line attenuation' + puts 'graph_vlabel attenuation dB' + when 'margin', 'noise' + puts 'graph_info This graph shows the ADSL SNR margin.' + puts 'graph_title ADSL line SNR margin' + puts 'graph_vlabel noise margin dB' + end + puts 'down.label downstream' + puts 'up.label upstream' + exit 0 end # Connect to the webadmin -http = Net::HTTP.start(host,port) +http = Net::HTTP.start(host, port) req = Net::HTTP::Get.new('/ADSLCStatus.htm') # send the login info req.basic_auth user, pass response = http.request(req) s = response.body -#Make sure we got the page successfully +# Make sure we got the page successfully if response.code != '200' - puts "Getting web page failed:" - case response.code - when '401' - puts 'Probably because the username and password are incorrect' - #Looks like the modem respons with 200 when you try to access a page that doesnt exist >_> - #when '404' - # puts 'Looks like the page this plugin needs isn\'t available...' - # puts 'Check your modem make/model/version' - end - puts s - exit 1 + puts 'Getting web page failed:' + case response.code + when '401' + puts 'Probably because the username and password are incorrect' + # Looks like the modem response with 200 when you try to access a page that does not exist >_> + # when '404' + # puts 'Looks like the page this plugin needs isn\'t available...' + # puts 'Check your modem make/model/version' + end + puts s + exit 1 end # Apply voodoo regex to the result HTML to get the data we want. case stat - when 'syncrate' - a = s.scan(/.*share\.curstate.*\n.*share\.downstr[^0-9]*([0-9]+).*share\.upstr[^0-9]*([0-9]+).*$/) - b,c = a[0] - puts 'down.value '+ (b.to_i*1000).to_s + "\n" + 'up.value '+ (c.to_i*1000).to_s - exit 0 - when 'attenuation' - a = s.scan(/.*share\.lineatt.*\n.*share\.down[^0-9]*([0-9]+).*share\.up[^0-9]*([0-9]+).*$/) - b,c = a[0] - puts 'down.value '+ (b.to_i).to_s + "\n" + 'up.value '+ (c.to_i).to_s - exit 0 - when 'margin','noise' - a = s.scan(/.*share\.noise.*\n.*share\.down[^0-9]*([0-9]+).*share\.up[^0-9]*([0-9]+).*$/) - b,c = a[0] - puts 'down.value '+ (b.to_i).to_s + "\n" + 'up.value '+ (c.to_i).to_s - exit 0 - else - puts 'Statistic ' + stat.to_s + ' not known, would you like me to fabricate it for you?' - exit 1 +when 'syncrate' + a = s.scan(/.*share\.curstate.*\n.*share\.downstr[^0-9]*([0-9]+).*share\.upstr[^0-9]*([0-9]+).*$/) + b, c = a[0] + puts 'down.value ' + (b.to_i * 1000).to_s + "\n" + 'up.value ' + (c.to_i * 1000).to_s + exit 0 +when 'attenuation' + a = s.scan(/.*share\.lineatt.*\n.*share\.down[^0-9]*([0-9]+).*share\.up[^0-9]*([0-9]+).*$/) + b, c = a[0] + puts 'down.value ' + b.to_i.to_s + "\n" + 'up.value ' + c.to_i.to_s + exit 0 +when 'margin', 'noise' + a = s.scan(/.*share\.noise.*\n.*share\.down[^0-9]*([0-9]+).*share\.up[^0-9]*([0-9]+).*$/) + b, c = a[0] + puts 'down.value ' + b.to_i.to_s + "\n" + 'up.value ' + c.to_i.to_s + exit 0 +else + puts 'Statistic ' + stat.to_s + ' not known, would you like me to fabricate it for you?' + exit 1 end diff --git a/plugins/router/arris-tg3442 b/plugins/router/arris-tg3442 new file mode 100755 index 00000000..604807e1 --- /dev/null +++ b/plugins/router/arris-tg3442 @@ -0,0 +1,270 @@ +#!/usr/bin/env python3 + +""" +=head1 NAME + +arris - MUNIN Plugin to monitor status of Arris TG3442 / TG2492LG-85 + and compatible cable modems + +=head1 DESCRIPTION +Connect to the web-frontend and get current DOCSIS status of upstream and +downstream channels. (Signal Power, SNR, Lock Status) + + +=head1 REQUIREMENTS +- BeautifulSoup +- pycryptodome + + +=head1 CONFIGURATION + +=head2 Example +[arris] +env.url http://192.168.100.1 +env.username admin +env.password yourpassword + + +=head2 Parameters +url - URL to web-frontend +username - defaults to "admin" +password - valid password + + +=head1 REFERENCES +https://www.arris.com/products/touchstone-tg3442-cable-voice-gateway/ + + +=head1 AUTHOR + + Copyright (c) 2019 Daniel Hiepler + Copyright (c) 2004-2009 Nicolas Stransky + Copyright (c) 2018 Lars Kruse + + +=head1 LICENSE + Permission to use, copy, and modify this software with or without fee + is hereby granted, provided that this entire notice is included in + all source code copies of any software which is or includes a copy or + modification of this software. + + THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR + IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY + REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE + MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR + PURPOSE. + + +=head1 MAGIC MARKERS + + #%# family=contrib + +=cut +""" + +import binascii +from bs4 import BeautifulSoup +from Crypto.Cipher import AES +import hashlib +import json +import re +import requests +import sys +import os + + +""" +The CREDENTIAL_COOKIE below equals the following: +base64.encodebytes(b'{ "unique":"280oaPSLiF", "family":"852", "modelname":"TG2492LG-85", ' + '"name":"technician", "tech":true, "moca":0, "wifi":5, "conType":"WAN", ' + '"gwWan":"f", "DefPasswdChanged":"YES" }').decode() +""" +CREDENTIAL_COOKIE = "eyAidW5pcXVlIjoiMjgwb2FQU0xpRiIsICJmYW1pbHkiOiI4NTIiLCAibW9kZWxuYW1lIjoiVEcy"\ + "NDkyTEctODUiLCAibmFtZSI6InRlY2huaWNpYW4iLCAidGVjaCI6dHJ1ZSwgIm1vY2EiOjAsICJ3"\ + "aWZpIjo1LCAiY29uVHlwZSI6IldBTiIsICJnd1dhbiI6ImYiLCAiRGVmUGFzc3dkQ2hhbmdlZCI6"\ + "IllFUyIgfQ==" + + +def login(session, url, username, password): + """login to """ + # get login page + r = session.get(f"{url}") + # parse HTML + h = BeautifulSoup(r.text, "lxml") + # get session id from javascript in head + current_session_id = re.search(r".*var currentSessionId = '(.+)';.*", h.head.text)[1] + + # encrypt password + salt = os.urandom(8) + iv = os.urandom(8) + key = hashlib.pbkdf2_hmac( + 'sha256', + bytes(password.encode("ascii")), + salt, + iterations=1000, + dklen=128 / 8 + ) + secret = {"Password": password, "Nonce": current_session_id} + plaintext = bytes(json.dumps(secret).encode("ascii")) + associated_data = "loginPassword" + # initialize cipher + cipher = AES.new(key, AES.MODE_CCM, iv) + # set associated data + cipher.update(bytes(associated_data.encode("ascii"))) + # encrypt plaintext + encrypt_data = cipher.encrypt(plaintext) + # append digest + encrypt_data += cipher.digest() + # return + login_data = { + 'EncryptData': binascii.hexlify(encrypt_data).decode("ascii"), + 'Name': username, + 'Salt': binascii.hexlify(salt).decode("ascii"), + 'Iv': binascii.hexlify(iv).decode("ascii"), + 'AuthData': associated_data + } + + # login + r = session.put( + f"{url}/php/ajaxSet_Password.php", + headers={ + "Content-Type": "application/json", + "csrfNonce": "undefined" + }, + data=json.dumps(login_data) + ) + + # parse result + result = json.loads(r.text) + # success? + if result['p_status'] == "Fail": + print("login failure", file=sys.stderr) + exit(-1) + # remember CSRF nonce + csrf_nonce = result['nonce'] + + # prepare headers + session.headers.update({ + "X-Requested-With": "XMLHttpRequest", + "csrfNonce": csrf_nonce, + "Origin": f"{url}/", + "Referer": f"{url}/" + }) + # set credentials cookie + session.cookies.set("credential", CREDENTIAL_COOKIE) + + # set session + r = session.post(f"{url}/php/ajaxSet_Session.php") + + +def docsis_status(session): + """get current DOCSIS status page, parse and return channel data""" + r = session.get(f"{url}/php/status_docsis_data.php") + # extract json from javascript + json_downstream_data = re.search(r".*json_dsData = (.+);.*", r.text)[1] + json_upstream_data = re.search(r".*json_usData = (.+);.*", r.text)[1] + # parse json + downstream_data = json.loads(json_downstream_data) + upstream_data = json.loads(json_upstream_data) + # convert lock status to numeric values + for d in [upstream_data, downstream_data]: + for c in d: + if c['LockStatus'] == "ACTIVE" or c['LockStatus'] == "Locked": + c['LockStatus'] = 1 + else: + c['LockStatus'] = 0 + return downstream_data, upstream_data + + +# ----------------------------------------------------------------------------- +if __name__ == "__main__": + # get config + url = os.getenv("url") + username = os.getenv("username") + password = os.getenv("password") + # validate config + if not url or not username or not password: + print("Set url, username and password first.", file=sys.stderr) + exit(1) + # create session + session = requests.Session() + # login with username and password + login(session, url, username, password) + # get DOCSIS status + downstream, upstream = docsis_status(session) + # prepare munin graph info + graph_descriptions = [ + { + "name": "up_signal", + "title": "DOCSIS Upstream signal strength", + "vlabel": "dBmV", + "info": "DOCSIS upstream signal strength by channel", + "data": upstream, + "key": "PowerLevel" + }, + { + "name": "up_lock", + "title": "DOCSIS Upstream lock", + "vlabel": "locked", + "info": "DOCSIS upstream channel lock status", + "data": upstream, + "key": "LockStatus" + }, + { + "name": "down_signal", + "title": "DOCSIS Downstream signal strength", + "vlabel": "dBmV", + "info": "DOCSIS downstream signal strength by channel", + "data": downstream, + "key": "PowerLevel" + }, + { + "name": "down_lock", + "title": "DOCSIS Downstream lock", + "vlabel": "locked", + "info": "DOCSIS downstream channel lock status", + "data": downstream, + "key": "LockStatus" + }, + { + "name": "down_snr", + "title": "DOCSIS Downstream signal/noise ratio", + "vlabel": "dB", + "info": "SNR/MER", + "data": downstream, + "key": "SNRLevel" + } + ] + + # configure ? + if len(sys.argv) > 1 and "config" == sys.argv[1]: + # process all graphs + for g in graph_descriptions: + # graph config + print(f"multigraph docsis_{g['name']}") + print(f"graph_title {g['title']}") + print("graph_category network") + print(f"graph_vlabel {g['vlabel']}") + print(f"graph_info {g['info']}") + print("graph_scale no") + + # channels + for c in g['data']: + # only use channels with PowerLevel + if not c['PowerLevel']: + continue + info_text = f"Channel type: {c['ChannelType']}, Modulation: {c['Modulation']}" + print(f"channel_{c['ChannelID']}.label {c['ChannelID']} ({c['Frequency']} MHz)") + print(f"channel_{c['ChannelID']}.info {info_text}") + + # output values ? + else: + # process all graphs + for g in graph_descriptions: + print(f"multigraph docsis_{g['name']}") + # channels + for c in g['data']: + # only use channels with PowerLevel + if not c['PowerLevel']: + continue + print(f"channel_{c['ChannelID']}.value {c[g['key']]}") diff --git a/plugins/router/avm-fritzbox-wan-traffic b/plugins/router/avm-fritzbox-wan-traffic index adda3a73..164e9faf 100755 --- a/plugins/router/avm-fritzbox-wan-traffic +++ b/plugins/router/avm-fritzbox-wan-traffic @@ -6,7 +6,7 @@ # # Author: Andreas Kreisl # -# Licence: Creative Commons - Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0) +# Licence: Creative Commons - Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0) # http://creativecommons.org/licenses/by-sa/3.0/ # ############################## diff --git a/plugins/router/bbox b/plugins/router/bbox new file mode 100755 index 00000000..0ae62a18 --- /dev/null +++ b/plugins/router/bbox @@ -0,0 +1,140 @@ +#! /bin/sh +# Parse bbox information - https://fr.wikipedia.org/wiki/Bbox +# (c) 2018 - GPLv2 - Steve Schnepp +# +# Configuration is done via ENV vars, here is the default : +# +# [bbox] +# env.IP 192.168.1.254 + + +# fail on error +set -e + +IP=${IP:-"192.168.1.254"} + +if [ "$1" = "config" ] +then + echo multigraph bbox_adsl_bw + echo graph_title BBox Adsl Bandwidth + echo graph_category network + echo up.label UpStream + echo down.label DownStream + + echo multigraph bbox_adsl_att + echo graph_title BBox Adsl Attenuation + echo graph_category network + echo up.label UpStream + echo down.label DownStream + + echo multigraph bbox_adsl_snr + echo graph_title BBox Adsl SignalNoise Ratio + echo graph_category network + echo up.label UpStream + echo down.label DownStream + + echo multigraph bbox_adsl_pkt + echo graph_title BBox Adsl Packets + echo graph_category network + echo graph_order up down up_f down_f up_c down_c up_u down_u + echo graph_vlabel packets/\${graph_period} + echo graph_args --base 1000 --logarithmic --lower-limit 0.001 + echo up.label UpStream + echo down.label DownStream + echo up_f.label UpStream "(FEC)" + echo down_f.label DownStream "(FEC)" + echo up_c.label UpStream "(CRC)" + echo down_c.label DownStream "(CEC)" + echo up_u.label UpStream "(HEC)" + echo down_u.label DownStream "(HEC)" + echo up.type DERIVE + echo down.type DERIVE + echo up_f.type DERIVE + echo down_f.type DERIVE + echo up_c.type DERIVE + echo down_c.type DERIVE + echo up_u.type DERIVE + echo down_u.type DERIVE + echo up.min 0 + echo down.min 0 + echo up_f.min 0 + echo down_f.min 0 + echo up_c.min 0 + echo down_c.min 0 + echo up_u.min 0 + echo down_u.min 0 + + echo multigraph bbox_adsl_bytes + echo graph_title BBox Adsl Usage + echo graph_category network + echo graph_order up down + echo graph_vlabel bytes/\${graph_period} + echo graph_args --base 1024 --lower-limit 0 + echo up.label UpStream + echo up.type DERIVE + echo down.label DownStream + echo down.type DERIVE + echo up.min 0 + echo down.min 0 + + echo multigraph bbox_adsl_uptime + echo graph_title BBox Adsl Uptime + echo graph_category network + echo graph_vlabel days + echo uptime.label Uptime + echo uptime.cdef uptime,3600,/,24/ + + exit 0 +fi + +TMPFILE="$(mktemp -d)" +trap 'rm -Rf "$TMPFILE"' EXIT + +cd "$TMPFILE" + +curl -s "http://$IP/api/v1/wan/ip" > "ip.json" & +curl -s "http://$IP/api/v1/wan/ip/stats" > "ip-stats.json" & +curl -s "http://$IP/api/v1/wan/xdsl" > "xdsl.json" & +curl -s "http://$IP/api/v1/wan/xdsl/stats" > "xdsl-stats.json" & + +wait + +if [ "$MUNIN_DEBUG" = 1 ] +then + for i in * + do + echo "========= $i ========" + cat "$i" + done +fi + +echo multigraph bbox_adsl_bw +echo up.value $(jq -r ".[].wan.xdsl.up.bitrates" < xdsl.json) +echo down.value $(jq -r ".[].wan.xdsl.down.bitrates" < xdsl.json) + +echo multigraph bbox_adsl_att +echo up.value $(jq -r ".[].wan.xdsl.up.attenuation" < xdsl.json) +echo down.value $(jq -r ".[].wan.xdsl.down.attenuation" < xdsl.json) + +echo multigraph bbox_adsl_snr +echo up.value $(jq -r ".[].wan.xdsl.up.noise" < xdsl.json) +echo down.value $(jq -r ".[].wan.xdsl.down.noise" < xdsl.json) + +echo multigraph bbox_adsl_pkt +echo up.value $(jq -r ".[].wan.ip.stats.tx.packets" < ip-stats.json) +echo down.value $(jq -r ".[].wan.ip.stats.rx.packets" < ip-stats.json) +echo up_f.value $(jq -r ".[].wan.xdsl.stats.remote_fec" < xdsl-stats.json) +echo down_f.value $(jq -r ".[].wan.xdsl.stats.local_fec" < xdsl-stats.json) +echo up_c.value $(jq -r ".[].wan.xdsl.stats.remote_hec" < xdsl-stats.json) +echo down_c.value $(jq -r ".[].wan.xdsl.stats.local_hec" < xdsl-stats.json) +echo up_u.value $(jq -r ".[].wan.xdsl.stats.remote_crc" < xdsl-stats.json) +echo down_u.value $(jq -r ".[].wan.xdsl.stats.local_crc" < xdsl-stats.json) + +echo multigraph bbox_adsl_bytes +echo up.value $(jq -r ".[].wan.ip.stats.tx.bytes" < ip-stats.json | tr -d '"') +echo down.value $(jq -r ".[].wan.ip.stats.rx.bytes" < ip-stats.json | tr -d '"') + +echo multigraph bbox_adsl_uptime +echo uptime.value $(jq -r ".[].wan.xdsl.showtime" < xdsl.json) + +exit 0 diff --git a/plugins/router/beboxstats b/plugins/router/beboxstats deleted file mode 100755 index 5f539724..00000000 --- a/plugins/router/beboxstats +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/perl -w - -use strict; - -my ($Args) = @ARGV; - -my $expecter = "/path/to/beboxstats.expect"; - -if ($Args) { - - # work out line to grab - if ($Args eq 'autoconf') { - # Check the expect script that polls the router exists - unless ( -e $expecter ) { - print "no (Can't find expect script. Check value of \$expecter: $expecter)\n"; - } else { - print "yes\n"; - } - - } elsif ($Args eq 'config') { # print out plugin parameters - printf(" -graph_title bebox line stats -graph_vlabel deciBels -graph_category network -graph_info This graph shows the various line parameters -attenuationdownstream.label Downstream Attenuation -attenuationupstream.label Upstream Attenuation -margindownstream.label Downstream Noise Margin -marginupstream.label Upstream Noise Margin -outputpowerdownstream.label Downstream Output Power -outputpowerupstream.label Upstream Output Power -margindownstream.type GAUGE -outputpowerupstream.type GAUGE -attenuationdownstream.type GAUGE -marginupstream.type GAUGE -outputpowerdownstream.type GAUGE -attenuationupstream.type GAUGE - "); - # .label is the Key on the graph - } else { - printf("Usage: $0 - No arguments: print line stats - autoconf: print 'yes' - config: print config info for Munin\n"); - } - -} else { - # if no arguments, just fetch the data and print it out - -my @insplitted = split(' ', `$expecter | grep dB`); - -print "margindownstream.value $insplitted[3]\n"; -print "marginupstream.value $insplitted[4]\n"; - -print "attenuationdownstream.value $insplitted[8]\n"; -print "attenuationupstream.value $insplitted[9]\n"; - -print "outputpowerdownstream.value $insplitted[13]\n"; -print "outputpowerupstream.value $insplitted[14]\n"; -} - - diff --git a/plugins/router/beboxstats.expect b/plugins/router/beboxstats.expect deleted file mode 100755 index c990f271..00000000 --- a/plugins/router/beboxstats.expect +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/expect -f - -# script to log on to a BeBox router [ST Speedtouch 780] and gather line stats - -# set timeout for response from router to 30 seconds -set timeout 30 -set router "host.or.ip.of.router" -set port "23" -set username "Administrator" -set password "routerpassword" - -# telnet to $router on $port -spawn telnet $router $port - -expect "Username :" -send "$username\r" - -expect "Password :" -send "$password\r" - -expect "}=>" -send "adsl info expand=enabled\r" - -expect "}=>" -send "exit\r" diff --git a/plugins/router/beboxsync b/plugins/router/beboxsync deleted file mode 100755 index 369ec8f6..00000000 --- a/plugins/router/beboxsync +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/perl -w - -# (C) Alex Dekker -# License is GPL - -use strict; - -my ($Args) = @ARGV; - -my $expecter = "/path/to/beboxstats.expect"; - -if ($Args) { - - # work out line to grab - if ($Args eq 'autoconf') { - # Check the expect script that polls the router exists - unless ( -e $expecter ) { - print "no (Can't find expect script. Check value of \$expecter: $expecter)\n"; - } else { - print "yes\n"; - } - } elsif ($Args eq 'config') { # print out plugin parameters - printf(" -graph_title bebox sync stats -graph_vlabel ATM kbps -graph_category network -graph_info This graph shows line sync speed -syncdownstream.label Downstream Sync Speed -syncupstream.label Upstream Sync Speed -syncdownstream.type GAUGE -syncupstream.type GAUGE - "); - # .label is the Key on the graph - } else { - printf("Usage: $0 - No arguments: print line stats - autoconf: print 'yes' - config: print config info for Munin\n"); - } - -} else { - # if no arguments, just fetch the data and print it out - -my @insplitted = split(' ', `$expecter | grep stream`); - -print "syncdownstream.value $insplitted[3]\n"; -print "syncupstream.value $insplitted[7]\n"; -} - - diff --git a/plugins/router/cisco-epc3010_ b/plugins/router/cisco-epc3010_ index e1b83638..d795587e 100755 --- a/plugins/router/cisco-epc3010_ +++ b/plugins/router/cisco-epc3010_ @@ -5,7 +5,7 @@ =head1 NAME -epc3010_ - munin-plugin to monitor Upstream/Downstream Power Levels and Signal to Noise Ratio on Cisco EPC3010 EuroDocsis 3.0 Data Modem +epc3010_ - munin-plugin to monitor Upstream/Downstream Power Levels and Signal to Noise Ratio on Cisco EPC3010 EuroDocsis 3.0 Data Modem =head1 CONFIGURATION @@ -43,9 +43,9 @@ DIRECTION=${0##*epc3010_} # Check if argument is autoconfig or config case "$1" in -autoconfig) +autoconfig) # Does your network have a Cisco EPC3010? - curl -s http://192.168.100.1/Docsis_system.asp | grep -q "Cisco EPC3010" + curl -s http://192.168.100.1/Docsis_system.asp | grep -q "Cisco EPC3010" if [ $? -eq 0 ]; then echo "yes" exit 0 @@ -54,7 +54,7 @@ autoconfig) exit 1 fi ;; -config) +config) if [ $DIRECTION == "downstream" ]; then printf "graph_title Cisco EPC3010 Downstream measurements\n"; printf "graph_args -l 0 --base 1000\n" diff --git a/plugins/router/conexant_adsl b/plugins/router/conexant_adsl index acf4b186..71ca6053 100755 --- a/plugins/router/conexant_adsl +++ b/plugins/router/conexant_adsl @@ -1,6 +1,6 @@ #!/bin/bash -# -# +# +# # Script to show adsl router stats for routers with Conexant based chips and the standard Conexant web admin gui like the eTec EpicRouter... # # Parameters understood: diff --git a/plugins/router/d-link-dir-655-router-statistics-plugin b/plugins/router/d-link-dir-655-router-statistics-plugin index 8c7923de..af8705a5 100755 --- a/plugins/router/d-link-dir-655-router-statistics-plugin +++ b/plugins/router/d-link-dir-655-router-statistics-plugin @@ -1,4 +1,4 @@ -#!/usr/bin/ruby +#!/usr/bin/env ruby # # Munin plugin for the D-link DIR-655 router # @@ -31,27 +31,26 @@ require 'mechanize' require 'digest/md5' require 'nokogiri' - def output - nics = Hash.new - nics["LAN"] = Hash.new - nics["WAN"] = Hash.new - nics["WLAN"] = Hash.new - password = ENV['router_password'] || "" - router_path = ENV['router_ip_address'] || "10.0.0.1" - router_path = "http://" + router_path + nics = {} + nics['LAN'] = {} + nics['WAN'] = {} + nics['WLAN'] = {} + password = ENV['router_password'] || '' + router_path = ENV['router_ip_address'] || '10.0.0.1' + router_path = 'http://' + router_path agent = Mechanize.new x = agent.get(router_path) salt = x.body.match(/salt = "(.*)"/)[1] - - # pad the pasword to length 16 + + # pad the password to length 16 pad_size = (16 - password.length) padded_password = password + "\x01" * pad_size - + # pad it the rest of the way, length 64 for user - salted_password = salt + padded_password + ("\x01" * (63 - salt.length - padded_password.length)) + "U" + salted_password = salt + padded_password + ("\x01" * (63 - salt.length - padded_password.length)) + 'U' login_hash = salt + Digest::MD5.hexdigest(salted_password) - + # authenticate against the router using the hash that we just built login_path = "#{router_path}/post_login.xml?hash=#{login_hash}" x = agent.get(login_path) @@ -62,88 +61,87 @@ def output doc.xpath('//interface').each do |interface| children = interface.children name = children.search('name')[0].text - nics[name]["packets_sent"] = children.search('packets_sent')[0].text - nics[name]["packets_received"] = children.search('packets_received')[0].text - nics[name]["tx_dropped"] = children.search('tx_dropped')[0].text + nics[name]['packets_sent'] = children.search('packets_sent')[0].text + nics[name]['packets_received'] = children.search('packets_received')[0].text + nics[name]['tx_dropped'] = children.search('tx_dropped')[0].text begin - nics[name]["tx_collisions"] = children.search('tx_collisions')[0].text + nics[name]['tx_collisions'] = children.search('tx_collisions')[0].text rescue Exception - nics[name]["tx_collisions"] = "0" + nics[name]['tx_collisions'] = '0' end - nics[name]["rx_dropped"] = children.search('rx_dropped')[0].text - nics[name]["rx_errors"] = children.search('rx_errors')[0].text + nics[name]['rx_dropped'] = children.search('rx_dropped')[0].text + nics[name]['rx_errors'] = children.search('rx_errors')[0].text end # get wifi associations and print out info for munin graph - puts "multigraph clients" + puts 'multigraph clients' clients_xml = agent.get("#{router_path}/wifi_assoc.xml").body j = 0 doc = Nokogiri::XML(clients_xml.to_s) - doc.xpath('//assoc').each do |assoc| - j+=1 + doc.xpath('//assoc').each do |_assoc| + j += 1 end - puts "wifi_assoc.value " + j.to_s + puts 'wifi_assoc.value ' + j.to_s # get dhcp clients and print out info for munin graph clients_xml = agent.get("#{router_path}/dhcp_clients.xml").body j = 0 doc = Nokogiri::XML(clients_xml.to_s) - doc.xpath('//client').each do |client| - j+=1 + doc.xpath('//client').each do |_client| + j += 1 end - puts "dhcp_clients.value " + j.to_s + puts 'dhcp_clients.value ' + j.to_s - puts "multigraph uptime" + puts 'multigraph uptime' # get uptime of connection clients_xml = agent.get("#{router_path}/wan_connection_status.xml").body doc = Nokogiri::XML(clients_xml.to_s) uptime = doc.children.search('wan_interface_up_time_0')[0].text - puts "uptime.value " + sprintf( "%.2f", (Float(uptime)/86400) ) + puts 'uptime.value ' + format('%.2f', (Float(uptime) / 86_400)) # graph overall interface packets transferred per interval - puts "multigraph if_packets" - for i in [ "LAN", "WAN", "WLAN" ] do - puts "#{i}_recv.value " + nics[i]["packets_received"] - puts "#{i}_send.value " + nics[i]["packets_sent"] + puts 'multigraph if_packets' + %w[LAN WAN WLAN].each do |i| + puts "#{i}_recv.value " + nics[i]['packets_received'] + puts "#{i}_send.value " + nics[i]['packets_sent'] end # graph overall interface dropped packets per interval - puts "multigraph if_drop" - for i in [ "LAN", "WAN", "WLAN" ] do - puts "#{i}_recv.value " + nics[i]["rx_dropped"] - puts "#{i}_send.value " + nics[i]["tx_dropped"] + puts 'multigraph if_drop' + %w[LAN WAN WLAN].each do |i| + puts "#{i}_recv.value " + nics[i]['rx_dropped'] + puts "#{i}_send.value " + nics[i]['tx_dropped'] end # graph overall interface collisions & errors per interval - puts "multigraph if_collerr" - for i in [ "LAN", "WAN", "WLAN" ] do - puts "#{i}_coll.value " + nics[i]["tx_collisions"] - puts "#{i}_err.value " + nics[i]["rx_errors"] + puts 'multigraph if_collerr' + %w[LAN WAN WLAN].each do |i| + puts "#{i}_coll.value " + nics[i]['tx_collisions'] + puts "#{i}_err.value " + nics[i]['rx_errors'] end - + # graph stats for each interface - for i in [ "LAN", "WAN", "WLAN" ] do + %w[LAN WAN WLAN].each do |i| puts "multigraph if_packets.#{i}" - puts "send.value " + nics[i]["packets_sent"] - puts "recv.value " + nics[i]["packets_received"] + puts 'send.value ' + nics[i]['packets_sent'] + puts 'recv.value ' + nics[i]['packets_received'] puts "multigraph if_drop.#{i}" - puts "send.value " + nics[i]["tx_dropped"] - puts "recv.value " + nics[i]["rx_dropped"] + puts 'send.value ' + nics[i]['tx_dropped'] + puts 'recv.value ' + nics[i]['rx_dropped'] puts "multigraph if_collerr.#{i}" - puts "coll.value " + nics[i]["tx_collisions"] - puts "err.value " + nics[i]["rx_errors"] + puts 'coll.value ' + nics[i]['tx_collisions'] + puts 'err.value ' + nics[i]['rx_errors'] end end - def config # build the configuration for graphs - puts "multigraph if_packets" + puts 'multigraph if_packets' puts 'graph_title D-Link DIR-655 interface traffic' puts 'graph_category network' puts 'graph_order LAN_recv LAN_send WAN_recv WAN_send WLAN_recv WLAN_send' puts 'graph_vlabel packets in (-) / out (+) per ${graph_period}' - for i in [ "LAN", "WAN", "WLAN" ] do + %w[LAN WAN WLAN].each do |i| puts "#{i}_recv.type DERIVE" puts "#{i}_recv.graph no" puts "#{i}_recv.min 0" @@ -153,12 +151,12 @@ def config puts "#{i}_send.min 0" end - puts "multigraph if_drop" + puts 'multigraph if_drop' puts 'graph_title D-Link DIR-655 interface drops' puts 'graph_category network' puts 'graph_order LAN_recv LAN_send WAN_recv WAN_send WLAN_recv WLAN_send' puts 'graph_vlabel packets / ${graph_period}' - for i in [ "LAN", "WAN", "WLAN" ] do + %w[LAN WAN WLAN].each do |i| puts "#{i}_recv.type DERIVE" puts "#{i}_recv.graph no" puts "#{i}_recv.min 0" @@ -168,12 +166,12 @@ def config puts "#{i}_send.min 0" end - puts "multigraph if_collerr" + puts 'multigraph if_collerr' puts 'graph_title D-Link DIR-655 interface collisions & errors' puts 'graph_category network' puts 'graph_order LAN_coll LAN_err WAN_coll WAN_err WLAN_coll WLAN_coll' puts 'graph_vlabel packets / ${graph_period}' - for i in [ "LAN", "WAN", "WLAN" ] do + %w[LAN WAN WLAN].each do |i| puts "#{i}_coll.label #{i} collisions" puts "#{i}_coll.type DERIVE" puts "#{i}_coll.min 0" @@ -182,26 +180,26 @@ def config puts "#{i}_err.min 0" end - puts "multigraph clients" - puts "graph_title D-Link DIR-655 client information" - puts "graph_category system" - puts "graph_order dhcp_clients wifi_assoc" - puts "graph_vlabel number of clients" - puts "dhcp_clients.label DHCP clients" - puts "dhcp_clients.type GAUGE" - puts "dhcp_clients.min 0" - puts "wifi_assoc.label wifi clients" - puts "wifi_assoc.type GAUGE" - puts "wifi_assoc.min 0" + puts 'multigraph clients' + puts 'graph_title D-Link DIR-655 client information' + puts 'graph_category system' + puts 'graph_order dhcp_clients wifi_assoc' + puts 'graph_vlabel number of clients' + puts 'dhcp_clients.label DHCP clients' + puts 'dhcp_clients.type GAUGE' + puts 'dhcp_clients.min 0' + puts 'wifi_assoc.label wifi clients' + puts 'wifi_assoc.type GAUGE' + puts 'wifi_assoc.min 0' - puts "multigraph uptime" - puts "graph_title Uptime" + puts 'multigraph uptime' + puts 'graph_title Uptime' puts 'graph_vlabel uptime in days' puts 'graph_category system' puts 'uptime.label uptime' puts 'uptime.draw AREA' - for i in [ "LAN", "WAN", "WLAN" ] do + %w[LAN WAN WLAN].each do |i| puts "multigraph if_packets.#{i}" puts "graph_title D-Link DIR-655 #{i} traffic" puts 'graph_category network' @@ -215,7 +213,7 @@ def config puts 'send.type DERIVE' puts 'send.negative recv' puts 'send.min 0' - + puts "multigraph if_drop.#{i}" puts "graph_title D-Link DIR-655 #{i} drops" puts 'graph_category network' @@ -229,7 +227,7 @@ def config puts 'send.type DERIVE' puts 'send.negative recv' puts 'send.min 0' - + puts "multigraph if_collerr.#{i}" puts "graph_title D-Link DIR-655 #{i} collisions & errors" puts 'graph_category network' @@ -244,10 +242,9 @@ def config end end - # main -if ARGV.length == 1 and ARGV[0] == 'config' - config() +if (ARGV.length == 1) && (ARGV[0] == 'config') + config else - output() + output end diff --git a/plugins/router/dsl-connection-speed b/plugins/router/dsl-connection-speed index 06984f1e..eb5a03c4 100755 --- a/plugins/router/dsl-connection-speed +++ b/plugins/router/dsl-connection-speed @@ -28,7 +28,7 @@ fi # create temp file for storing wget output TMPFILE=$(mktemp) -# if we have auth variables then add them to +# if we have auth variables then add them to # wget cmdline if [[ "$DSLUSER" != "" && "$DSLPASS" != "" ]] then diff --git a/plugins/router/dsl-stats b/plugins/router/dsl-stats index 58b4bf20..cfdeaf9c 100755 --- a/plugins/router/dsl-stats +++ b/plugins/router/dsl-stats @@ -41,7 +41,7 @@ fi TMPFILE=$(mktemp) -# if we have auth variables then add them to +# if we have auth variables then add them to # wget cmdline if [[ "$DSLUSER" != "" && "$DSLPASS" != "" ]] then diff --git a/plugins/router/example-graphs/freebox_adsl-day.png b/plugins/router/example-graphs/freebox_adsl-day.png new file mode 100644 index 00000000..98ac66fc Binary files /dev/null and b/plugins/router/example-graphs/freebox_adsl-day.png differ diff --git a/plugins/router/example-graphs/freebox_adsl_errors-day.png b/plugins/router/example-graphs/freebox_adsl_errors-day.png new file mode 100644 index 00000000..d823874b Binary files /dev/null and b/plugins/router/example-graphs/freebox_adsl_errors-day.png differ diff --git a/plugins/router/example-graphs/freebox_traffic-day.png b/plugins/router/example-graphs/freebox_traffic-day.png new file mode 100644 index 00000000..1c5567a4 Binary files /dev/null and b/plugins/router/example-graphs/freebox_traffic-day.png differ diff --git a/plugins/router/example-graphs/freebox_uptime-day.png b/plugins/router/example-graphs/freebox_uptime-day.png new file mode 100644 index 00000000..1faa3cbb Binary files /dev/null and b/plugins/router/example-graphs/freebox_uptime-day.png differ diff --git a/plugins/router/example-graphs/freebox_users-day.png b/plugins/router/example-graphs/freebox_users-day.png new file mode 100644 index 00000000..6428a9d2 Binary files /dev/null and b/plugins/router/example-graphs/freebox_users-day.png differ diff --git a/plugins/router/freebox b/plugins/router/freebox new file mode 100755 index 00000000..9cb6c16b --- /dev/null +++ b/plugins/router/freebox @@ -0,0 +1,491 @@ +#!/bin/bash -u +# -*- sh -*- + +: << =cut + +=head1 NAME + +freebox - Plugin to monitor stats of a Freebox (Free.fr's custom series of routers) + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 CAVEAT + +Only tested on a Freebox v5 with an ADSL uplink. + +=head1 AUTHOR + +Olivier Mehani + +Copyright (C) 2019 Olivier Mehani + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + +=cut + +# shellcheck disable=SC1090 +. "${MUNIN_LIBDIR}/plugins/plugin.sh" + +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + set -x +fi + +fbx_info_url="http://mafreebox.free.fr/pub/fbx_info.txt" +# +# Example output (including stray whitespaces): +# + +# ______________________________________________________________________ +# +# Etat de la Freebox +# ______________________________________________________________________ +# +# +# Informations générales : +# ======================== +# +# Modèle Freebox ADSL +# Version du firmware 1.5.26 +# Mode de connection Dégroupé +# Temps depuis la mise en route 9 jours, 23 heures, 4 minutes +# +# +# Téléphone : +# =========== +# +# Etat Ok +# Etat du combiné Raccroché +# Sonnerie Inactive +# +# +# Adsl : +# ====== +# +# Etat Showtime +# Protocole ADSL2+ +# Mode Interleaved +# +# Descendant Montant +# -- -- +# Débit ATM 1913 kb/s 945 kb/s +# Marge de bruit 5.10 dB 5.60 dB +# Atténuation 50.00 dB 23.30 dB +# FEC 425249 15503719 +# CRC 5489 0 +# HEC 705 4296208 +# +# Journal de connexion adsl : +# --------------------------- +# +# Date Etat Débit (kb/s) +# -- -- -- +# 21/05/2019 à 08:35:49 Connexion 1913 / 988 +# 21/05/2019 à 08:34:38 Déconnexion +# 20/05/2019 à 21:39:30 Connexion 1156 / 988 +# 20/05/2019 à 21:38:43 Déconnexion +# 20/05/2019 à 14:50:52 Connexion 1925 / 985 +# 20/05/2019 à 14:50:05 Déconnexion +# 20/05/2019 à 07:14:38 Connexion 206 / 833 +# 20/05/2019 à 07:14:10 Déconnexion +# 20/05/2019 à 07:13:50 Connexion 983 / 587 +# 20/05/2019 à 07:12:49 Déconnexion +# 20/05/2019 à 07:09:32 Connexion 1113 / 864 +# 20/05/2019 à 07:08:45 Déconnexion +# 20/05/2019 à 07:07:22 Connexion 1195 / 826 +# 20/05/2019 à 07:06:59 Déconnexion +# 20/05/2019 à 07:06:39 Connexion 1832 / 923 +# 20/05/2019 à 07:05:51 Déconnexion +# 20/05/2019 à 06:58:10 Connexion 1238 / 887 +# 20/05/2019 à 06:57:23 Déconnexion +# 20/05/2019 à 06:56:46 Connexion 1375 / 935 +# 20/05/2019 à 06:55:58 Déconnexion +# 20/05/2019 à 06:55:32 Connexion 1353 / 904 +# 20/05/2019 à 06:54:45 Déconnexion +# 20/05/2019 à 06:50:37 Connexion 1380 / 923 +# 20/05/2019 à 06:49:32 Déconnexion +# +# +# Wifi : +# ====== +# +# Etat Ok +# Modèle Ralink RT2880 +# Canal 1 +# État du réseau Activé +# Ssid NetSSID +# Type de clé WPA (TKIP+AES) +# FreeWifi Désactivé +# FreeWifi Secure Actif +# +# +# Réseau : +# ======== +# +# Adresse MAC Freebox XX:XX:XX:XX:XX:XX +# Adresse IP 203.0.113.60 +# IPv6 Activé +# Mode routeur Activé +# Adresse IP privée 192.0.2.1 +# Adresse IP DMZ 192.0.2.2 +# Adresse IP Freeplayer 192.0.2.0 +# Réponse au ping Activé +# Proxy Wake On Lan Désactivé +# Serveur DHCP Activé +# Plage d'adresses dynamique 192.0.2.100 - 192.0.2.254 +# +# Attributions dhcp : +# ------------------- +# +# Adresse MAC Adresse IP +# -- -- +# XX:XX:XX:XX:XX:XX 192.0.2.101 +# XX:XX:XX:XX:XX:XX 192.0.2.102 +# XX:XX:XX:XX:XX:XX 192.0.2.103 +# XX:XX:XX:XX:XX:XX 192.0.2.104 +# XX:XX:XX:XX:XX:XX 192.0.2.105 +# XX:XX:XX:XX:XX:XX 192.0.2.100 +# XX:XX:XX:XX:XX:XX 192.0.2.3 +# +# Redirections de ports : +# ----------------------- +# +# Protocole Port source Destination Port destination +# -- -- -- -- +# TCP 22 192.0.2.3 22 +# TCP 2222 192.0.2.100 22 +# TCP 1194 192.0.2.3 1194 +# UDP 1194 192.0.2.3 1194 +# TCP 80 192.0.2.3 80 +# +# Interfaces réseau : +# ------------------- +# +# Lien Débit entrant Débit sortant +# -- -- -- +# WAN Ok 1 ko/s 1 ko/s +# Ethernet 0 ko/s 0 ko/s +# USB Non connecté +# Switch 100baseTX-FD 1 ko/s 1 ko/s + +fetch() { + # shellcheck disable=SC2154 + curl -s "$@" +} + +get_line_column() { + local key="$1" + local field_index="$2" + awk '/'"$key"'/ { + if ($('"$field_index"') ~ /^[.0-9]+$/) { + print $('"$field_index"') + } + }' +} + +get_data() { + INFO="$(fetch "${fbx_info_url}" | iconv -f latin1 -t utf8)" + + UPTIME_DAYS=$(get_line_column "Temps depuis la mise en route" "NF-5" <<< "${INFO}") + UPTIME_HOURS=$(get_line_column "Temps depuis la mise en route" "NF-3" <<< "${INFO}") + UPTIME_MINUTES=$(get_line_column "Temps depuis la mise en route" "NF-1" <<< "${INFO}") + + ATM_DOWN=$(get_line_column "ATM" "NF-3" <<< "${INFO}") + ATM_UP=$(get_line_column "ATM" "NF-1" <<< "${INFO}") + + NOISE_DOWN=$(get_line_column "bruit" "NF-3" <<< "${INFO}") + NOISE_UP=$(get_line_column "bruit" "NF-1" <<< "${INFO}") + + ATTENUATION_DOWN=$(get_line_column "Atténuation" "NF-3" <<< "${INFO}") + ATTENUATION_UP=$(get_line_column "Atténuation" "NF-1" <<< "${INFO}") + + FEC_DOWN=$(get_line_column "FEC" "NF-1" <<< "${INFO}") + FEC_UP=$(get_line_column "FEC" "NF" <<< "${INFO}") + CRC_DOWN=$(get_line_column "CRC" "NF-1" <<< "${INFO}") + CRC_UP=$(get_line_column "CRC" "NF" <<< "${INFO}") + HEC_DOWN=$(get_line_column "HEC" "NF-1" <<< "${INFO}") + HEC_UP=$(get_line_column "HEC" "NF" <<< "${INFO}") + + WAN_DOWN=$(get_line_column "WAN" "NF-3" <<< "${INFO}") + WAN_UP=$(get_line_column "WAN" "NF-1" <<< "${INFO}") + ETH_DOWN=$(get_line_column "Ethernet" "NF-3" <<< "${INFO}") + ETH_UP=$(get_line_column "Ethernet" "NF-1" <<< "${INFO}") + USB_DOWN=$(get_line_column "USB" "NF-3" <<< "${INFO}") + USB_UP=$(get_line_column "USB" "NF-1" <<< "${INFO}") + SWITCH_DOWN=$(get_line_column "Switch" "NF-3" <<< "${INFO}") + SWITCH_UP=$(get_line_column "Switch" "NF-1" <<< "${INFO}") + + DHCP_CLIENTS=$(sed -nE '/Attributions dhcp/,/Redirections de ports/{s/^\s*([0-9A-F:]{17}).*$/\1/p}' <<< "${INFO}" | wc -l) + REDIRECT_TCP=$(sed -nE '/Redirections de ports/,/Interfaces réseau/{s/^\s*(TCP).*$/\1/p}' <<< "${INFO}" | wc -l) + REDIRECT_UDP=$(sed -nE '/Redirections de ports/,/Interfaces réseau/{s/^\s*(UDP).*$/\1/p}' <<< "${INFO}" | wc -l) + + if [ -z "${UPTIME_DAYS:-}" ]; then + UPTIME_DAYS=0 + fi + if [ -z "${UPTIME_HOURS:-}" ]; then + UPTIME_HOURS=0 + fi + if [ -z "${UPTIME_MINUTES:-}" ]; then + UPTIME_MINUTES=0 + fi + FREEBOX_UPTIME=$((UPTIME_DAYS*86400+UPTIME_HOURS*3600+UPTIME_MINUTES*60)) + + LAST_CONNECT=$(sed -nE '/Journal de connexion adsl/,+5{s#^\s*([0-9]{2})/([0-9]{2})/([0-9]{4})[^0-9]*([0-9]+):([0-9]+):([0-9]+)\s+Connexion.*#\3-\2-\1T\4:\5:\6#p}' <<< "${INFO}") + CONNECTION_UPTIME=U + if [ -n "${LAST_CONNECT}" ]; then + CONNECTION_UPTIME="$(($(date +%s)-$(date -d "${LAST_CONNECT}" +%s)))" + + fi +} + +graph_config() { + graph="" + if [ -n "${1:-}" ]; then + graph=".$1" + fi + + echo "multigraph freebox${graph}" + + case "$graph" in + .adsl) + echo "graph_title ADSL characteristics" + echo 'graph_category network' + echo 'graph_vlabel dB in (-) / out (+)' + echo 'graph_order noise_down noise attenuation_down attenuation' + + echo 'noise_down.label Noise down' + echo 'noise_down.graph no' + echo 'noise.label Noise' + echo 'noise.negative noise_down' + + echo 'attenuation_down.label Attenuation down' + echo 'attenuation_down.graph no' + echo 'attenuation.label Attenuation' + echo 'attenuation.negative attenuation_down' + ;; + .adsl_errors) + echo 'graph_title ADSL error correction' + echo 'graph_category network' + echo 'graph_vlabel errors in (-) / out (+)' + echo 'graph_order fec_down fec crc_down crc hec_down hec' + + echo 'fec_down.label FEC down' + echo 'fec_down.type DERIVE' + echo 'fec_down.min 0' + echo 'fec_down.graph no' + echo 'fec.label FEC' + echo 'fec.type DERIVE' + echo 'fec.min 0' + echo 'fec.negative fec_down' + + echo 'crc_down.label CRC down' + echo 'crc_down.type DERIVE' + echo 'crc_down.min 0' + echo 'crc_down.graph no' + echo 'crc.label CRC' + echo 'crc.type DERIVE' + echo 'crc.min 0' + echo 'crc.negative crc_down' + + echo 'hec_down.label HEC down' + echo 'hec_down.type DERIVE' + echo 'hec_down.min 0' + echo 'hec_down.graph no' + echo 'hec.label HEC' + echo 'hec.type DERIVE' + echo 'hec.min 0' + echo 'hec.negative hec_down' + ;; + .traffic) + echo 'graph_title Traffic' + echo 'graph_category network' + echo 'graph_vlabel bits per second in (-) / out (+)' + echo 'graph_order atm_down atm wan_down wan eth_down eth usb_down usb switch_down switch' + + echo 'atm_down.label ATM down' + echo 'atm_down.graph no' + echo 'atm_down.cdef atm_down,1000,*' + echo 'atm.label ATM sync' + echo 'atm.negative atm_down' + echo 'atm.cdef atm,1000,*' + + echo 'wan_down.label WAN down' + echo 'wan_down.graph no' + echo 'wan_down.cdef wan_down,8000,*' + echo 'wan.label WAN' + echo 'wan.negative wan_down' + echo 'wan.cdef wan,8000,*' + + echo 'eth_down.label ETH down' + echo 'eth_down.graph no' + echo 'eth_down.cdef eth_down,8000,*' + echo 'eth.label Ethernet' + echo 'eth.negative eth_down' + echo 'eth.cdef eth,8000,*' + + echo 'usb_down.label USB down' + echo 'usb.label USB' + echo 'usb_down.graph no' + echo 'usb_down.cdef usb_down,8000,*' + echo 'usb.negative usb_down' + echo 'usb.cdef usb,8000,*' + + echo 'switch_down.label Switch down' + echo 'switch_down.graph no' + echo 'switch_down.cdef switch_down,8000,*' + echo 'switch.label Switch' + echo 'switch.cdef switch,8000,*' + echo 'switch.negative switch_down' + ;; + .uptime) + echo 'graph_title Uptimes' + echo 'graph_category network' + echo 'graph_vlabel days' + echo 'graph_args --logarithmic' + + echo 'freebox.label Freebox' + echo 'freebox.draw AREA' + echo 'freebox.cdef freebox,86400,/' + + echo 'connection.label Connection' + echo 'connection.cdef connection,86400,/' + ;; + .users) + echo 'graph_title Network users' + echo 'graph_category network' + echo 'graph_vlabel count' + + echo 'dhcp.label DHCP leases' + echo 'redirect_tcp.label TCP redirections' + echo 'redirect_udp.label UDP redirections' + ;; + *) + echo 'graph_title Uplink traffic' + echo 'graph_category network' + echo 'graph_vlabel bits per second in (-) / out (+)' + echo 'graph_order main_atm_down main_atm main_wan_down main_wan' + + # XXX: summary data similar to (more detailed) traffic + echo 'main_wan_down.label WAN down' + echo 'main_wan_down.graph no' + echo 'main_wan_down.cdef main_wan_down,8000,*' + echo 'main_wan.label bps' + echo 'main_wan.negative main_wan_down' + echo 'main_wan.cdef main_wan,8000,*' + + echo 'main_atm_down.label ATM down' + echo 'main_atm_down.graph no' + echo 'main_atm_down.cdef main_atm_down,1000,*' + echo 'main_atm.label bps (max)' + echo 'main_atm.negative main_atm_down' + echo 'main_atm.cdef main_atm,1000,*' + ;; + + esac + echo +} + +graph_data() { + graph="" + if [ -n "${1:-}" ]; then + graph=".$1" + fi + + echo "multigraph freebox${graph}" + case "$graph" in + .adsl) + echo "noise.value ${NOISE_UP:-U}" + echo "noise_down.value ${NOISE_DOWN:-U}" + + echo "attenuation.value ${ATTENUATION_UP:-U}" + echo "attenuation_down.value ${ATTENUATION_DOWN:-U}" + ;; + .adsl_errors) + echo "fec.value ${FEC_UP:-U}" + echo "fec_down.value ${FEC_DOWN:-U}" + + echo "crc.value ${CRC_UP:-U}" + echo "crc_down.value ${CRC_DOWN:-U}" + + echo "hec.value ${HEC_UP:-U}" + echo "hec_down.value ${HEC_DOWN:-U}" + ;; + .traffic) + echo "atm.value ${ATM_UP:-U}" + echo "atm_down.value ${ATM_DOWN:-U}" + + echo "wan.value ${WAN_UP:-U}" + echo "wan_down.value ${WAN_DOWN:-U}" + + echo "eth.value ${ETH_UP:-U}" + echo "eth_down.value ${ETH_DOWN:-U}" + + echo "usb.value ${USB_UP:-U}" + echo "usb_down.value ${USB_DOWN:-U}" + + echo "switch.value ${SWITCH_UP:-U}" + echo "switch_down.value ${SWITCH_DOWN:-U}" + ;; + .uptime) + echo "freebox.value ${FREEBOX_UPTIME:-U}" + echo "connection.value ${CONNECTION_UPTIME:-U}" + ;; + .users) + echo "dhcp.value ${DHCP_CLIENTS:-U}" + echo "redirect_tcp.value ${REDIRECT_TCP:-U}" + echo "redirect_udp.value ${REDIRECT_UDP:-U}" + ;; + *) + echo "main_wan.value ${WAN_UP:-U}" + echo "main_wan_down.value ${WAN_DOWN:-U}" + + echo "main_atm.value ${ATM_UP:-U}" + echo "main_atm_down.value ${ATM_DOWN:-U}" + + esac + echo +} + +main() { + case ${1:-} in + autoconf) + for CMD in curl iconv; do + if ! command -v "${CMD}" >/dev/null; then + echo "no (${CMD} not found)" + fi + done + + if curl --connect-timeout 1 -qso /dev/null "${fbx_info_url}"; then + echo 'yes' + else + echo "no (failed to retrieve ${fbx_info_url})" + fi + ;; + config) + graph_config + graph_config adsl + graph_config adsl_errors + graph_config traffic + graph_config uptime + graph_config users + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then + main + fi + ;; + *) + get_data + graph_data + graph_data adsl + graph_data adsl_errors + graph_data traffic + graph_data uptime + graph_data users + ;; + esac +} + +main "${1:-}" diff --git a/plugins/router/freeboxuptime b/plugins/router/freeboxuptime index c7dcb654..7626c700 100755 --- a/plugins/router/freeboxuptime +++ b/plugins/router/freeboxuptime @@ -22,7 +22,7 @@ # Running: HP embedded # OS details: HP Onboard Administrator management console # Uptime: 7.226 days (since Thu Dec 9 21:01:44 2010) -# +# # OS detection performed. Please report any incorrect results at http://nmap.org/submit/ . # Nmap done: 1 IP address (1 host up) scanned in 29.279 seconds # ---------------------------------------------------------------------------------------------------- @@ -30,7 +30,7 @@ # by using nmap on a specific tcp port, the detection is pretty fast (2-5 seconds) # with this command: nmap -O --osscan-guess -p80 remote_host # -# if you dont want nmap to query your freebox each time, set CACHE_HOURS=n +# if you don't want nmap to query your freebox each time, set CACHE_HOURS=n # to keep the uptime in cache for n hours # if not, set CACHE_HOURS=0 # @@ -65,16 +65,14 @@ fi if [ "$1" = "autoconf" ]; then if [ -z "$NMAP" ]; then echo "no (nmap not installed)" - exit 1 else if [ $PING -eq 0 ]; then echo "no (Freebox not reachable)" - exit 2 else - echo yes - exit 0 + echo yes fi fi + exit 0 fi @@ -87,7 +85,7 @@ if [ "$1" = "config" ]; then graph_info="Shows the uptime of your freebox (cache: ${CACHE_HOURS}h" if [ -f $CACHE_FILE ]; then lastCheck=$(stat -c %z $CACHE_FILE | cut -d"." -f1) - lastReboot=$(awk -F"@" '{print $2}' $CACHE_FILE) + lastReboot=$(awk -F"@" '{print $2}' $CACHE_FILE) graph_info="${graph_info} - last check: ${lastCheck} - last reboot: $lastReboot" else graph_info="${graph_info})" diff --git a/plugins/router/snmp__cisco_sbs_cpu b/plugins/router/snmp__cisco_sbs_cpu old mode 100644 new mode 100755 diff --git a/plugins/router/snmp__juniper b/plugins/router/snmp__juniper index 216283ba..e6568461 100755 --- a/plugins/router/snmp__juniper +++ b/plugins/router/snmp__juniper @@ -126,7 +126,7 @@ class JunOSSnmpClient(object): return int(varBindTable[0][1]) - + def get_data(self): devs = self.get_devices() @@ -141,19 +141,19 @@ class JunOSSnmpClient(object): devices = self.get_devices() data_def = [ - ('temp', self.hostname, 'System temperature', '--base 1000', 'System temperature in C', 'system'), - ('cpu', self.hostname, 'CPU usage', '--base 1000 -l 0 --upper-limit 100', 'CPU usage in %', 'system'), - ('buffer', self.hostname, 'Buffer usage', '--base 1000 -l 0 --upper-limit 100', 'Buffer usage in %', 'system'), + ('temp', self.hostname, 'System temperature', '--base 1000', 'System temperature in C'), + ('cpu', self.hostname, 'CPU usage', '--base 1000 -l 0 --upper-limit 100', 'CPU usage in %'), + ('buffer', self.hostname, 'Buffer usage', '--base 1000 -l 0 --upper-limit 100', 'Buffer usage in %'), ] - for datarow, hostname, title, args, vlabel, category in data_def: + for datarow, hostname, title, args, vlabel in data_def: print """multigraph juniper_{datarow} host_name {hostname} graph_title {title} graph_vlabel {vlabel} graph_args {args} -graph_category {category} -graph_info {title}""".format(datarow=datarow, hostname=hostname, title=title, args=args, vlabel=vlabel, category=category) +graph_category fw +graph_info {title}""".format(datarow=datarow, hostname=hostname, title=title, args=args, vlabel=vlabel) for suffix, node in devices.iteritems(): ident = "%s_%s" % (datarow, node) @@ -179,12 +179,8 @@ else: if not (host and port and community): print "# Bad configuration. Cannot run with Host=%s, port=%s and community=%s" % (host, port, community) sys.exit(1) - + if "config" in sys.argv[1:]: c.print_config() else: c.execute() - -# for Munin Plugin Gallery -# graph_category network - diff --git a/plugins/router/snmp__juniper_spu b/plugins/router/snmp__juniper_spu index 920f0b83..5ceab231 100755 --- a/plugins/router/snmp__juniper_spu +++ b/plugins/router/snmp__juniper_spu @@ -222,7 +222,7 @@ else: if not (host and port and community): print "# Bad configuration. Cannot run with Host=%s, port=%s and community=%s" % (host, port, community) sys.exit(1) - + if "config" in sys.argv[1:]: c.print_config() else: diff --git a/plugins/router/snmp__linksys_poe b/plugins/router/snmp__linksys_poe index c3fa6b88..72d6e0bb 100755 --- a/plugins/router/snmp__linksys_poe +++ b/plugins/router/snmp__linksys_poe @@ -1,4 +1,4 @@ -#!/usr/bin/ruby +#!/usr/bin/env ruby " =head1 NAME @@ -33,7 +33,7 @@ PoE ports. Information is gathered from Linksys' private MIB space, so it's probably only applicable to Linksys devices. I have been unable to get an actual copy of -the appropriate MIB, so I don't know the actual names of the values I'm +the appropriate MIB, so I don't know the actual names of the values I'm retrieving. =head1 MAGIC MARKERS @@ -65,48 +65,43 @@ rights to this plugin are waived. Do with it as you wish. require 'snmp' -idx_oid = "enterprises.3955.89.108.1.1.2" -max_oid = "enterprises.3955.89.108.1.1.6" -cur_oid = "enterprises.3955.89.108.1.1.5" +idx_oid = 'enterprises.3955.89.108.1.1.2' +max_oid = 'enterprises.3955.89.108.1.1.6' +cur_oid = 'enterprises.3955.89.108.1.1.5' -community = ENV['community'] || "public" +community = ENV['community'] || 'public' version = ENV['version'] == '1' ? :SNMPv1 : :SNMPv2c case ARGV[0] -when "autoconf" - puts "no" - exit 1 -when "snmpconf" - puts "require 1.3.6.1.4.1.3955.89.108.1.1.2.1. [0-9]" - puts "require 1.3.6.1.4.1.3955.89.108.1.1.5.1. [0-9]" - puts "require 1.3.6.1.4.1.3955.89.108.1.1.6.1. [0-9]" - exit 0; -when "config" +when 'snmpconf' + puts 'require 1.3.6.1.4.1.3955.89.108.1.1.2.1. [0-9]' + puts 'require 1.3.6.1.4.1.3955.89.108.1.1.5.1. [0-9]' + puts 'require 1.3.6.1.4.1.3955.89.108.1.1.6.1. [0-9]' + exit 0 +when 'config' host = $0.match('^(?:|.*\/)snmp_([^_]+)')[1] puts "host_name #{host}" - puts "graph_title PoE Power Usage" - puts "graph_vlabel Watts" - puts "graph_category sensors" + puts 'graph_title PoE Power Usage' + puts 'graph_vlabel Watts' + puts 'graph_category sensors' max_current = 0 - SNMP::Manager.open(:Host => host, - :Community => community, - :Version => version) do |manager| + SNMP::Manager.open(Host: host, + Community: community, + Version: version) do |manager| manager.walk([idx_oid, max_oid]) do |row| puts "iface_#{row[0].value}.label Port #{row[0].value}" puts "iface_#{row[0].value}.cdef iface_#{row[0].value},1000,/" puts "iface_#{row[0].value}.line #{row[1].value.to_f / 1000}" - if row[1].value > max_current - max_current = row[1].value - end + max_current = row[1].value if row[1].value > max_current end end puts "graph_args --upper-limit #{max_current.to_f / 1000}" exit 0 else host = $0.match('^(?:|.*\/)snmp_([^_]+)')[1] - SNMP::Manager.open(:Host => host, - :Community => community, - :Version => version) do |manager| + SNMP::Manager.open(Host: host, + Community: community, + Version: version) do |manager| manager.walk([idx_oid, cur_oid]) do |row| puts "iface_#{row[0].value}.value #{row[1].value}" end diff --git a/plugins/router/snmp__mikrotik b/plugins/router/snmp__mikrotik new file mode 100755 index 00000000..2cf1880b --- /dev/null +++ b/plugins/router/snmp__mikrotik @@ -0,0 +1,159 @@ +#!/usr/bin/perl -w + +=head1 NAME + +snmp__mikrotik - monitor Mikrotik routers via SNMP + +=head1 APPLICABLE SYSTEMS + +Mikrotik Routers + +=head1 CONFIGURATION + +As a rule SNMP plugins need site specific configuration. The default +configuration (shown here) will only work on insecure sites/devices. + [snmp_*] + env.version 2 + env.community public + +=head1 MAGIC MARKERS + +#%# family=snmpauto +#%# capabilities=snmpconf + +=head1 BUGS + +None known. + +=head1 AUTHOR + +Copyright (C) 2020 Alejandro Suarez (teconecta.es) +Based on snmp__if_ plugin. + +=head1 LICENSE + +GPLv2. + +=cut + +use strict; +use Net::SNMP; + +my $DEBUG = 0; + +my $host = $ENV{host} || undef; +my $port = $ENV{port} || 161; +my $community = $ENV{community} || "public"; + +my $sysFlashUsageOID = "1.3.6.1.2.1.25.2.3.1.6.131072"; +my $sysFlashTotalOID = "1.3.6.1.2.1.25.2.3.1.5.131072"; +my $sysRAMUsageOID = "1.3.6.1.2.1.25.2.3.1.6.65536"; +my $sysRAMTotalOID = "1.3.6.1.2.1.25.2.3.1.5.65536"; +my $sysTempOID = "1.3.6.1.4.1.14988.1.1.3.10.0"; + +my $response; + +if (defined $ARGV[0] and $ARGV[0] eq "snmpconf") { + print "require $sysFlashUsageOID ^\\d\n"; + print "require $sysFlashTotalOID ^\\d\n"; + print "require $sysRAMUsageOID ^\\d\n"; + print "require $sysRAMTotalOID ^\\d\n"; + exit 0; +} + +if ($0 =~ /^(?:|.*\/)snmp_([^_]+)_mikrotik$/) { + $host = $1; + if ($host =~ /^([^:]+):(\d+)$/) { + $host = $1; + $port = $2; + } +} elsif (!defined($host)) { + print "# Debug: $0 -- $1\n" if $DEBUG; + die "# Error: couldn't understand what I'm supposed to monitor."; +} + +my ($session, $error) = Net::SNMP->session( + -hostname => $host, + -community => $community, + -port => $port + ); + + +die "Croaking: $error" unless (defined ($session)); + + +if ($ARGV[0] and $ARGV[0] eq "config") { + print "host_name $host\n"; + $response = $session->get_request($sysFlashTotalOID); + if (defined $response) { + print "multigraph flash\n"; + print "graph_args --base 1024 -l 0 --vertical-label Bytes --upper-limit " . ($response->{$sysFlashTotalOID} * 1024) . "\n"; + print "graph_title Flash disk usage\n"; + print "graph_category system\n"; + print "graph_info This graph shows the router's flash disk usage.\n"; + print "graph_order Total Used\n"; + print "graph_vlabel bytes\n"; + print "sysFlashTotal.label Total Memory\n"; + print "sysFlashTotal.draw AREA\n"; + print "sysFlashUsage.label Used Memory\n"; + print "sysFlashUsage.draw AREA\n"; + } + + $response = $session->get_request($sysRAMTotalOID); + if (defined $response) { + print "multigraph ram\n"; + print "graph_args --base 1024 -l 0 --vertical-label Bytes --upper-limit " . ($response->{$sysRAMTotalOID} * 1024) . "\n"; + print "graph_title RAM usage\n"; + print "graph_category system\n"; + print "graph_info This graph shows the router's memory usage.\n"; + print "graph_order Total Used\n"; + print "graph_vlabel bytes\n"; + print "sysRAMTotal.label Total Memory\n"; + print "sysRAMTotal.draw AREA\n"; + print "sysRAMUsage.label Used Memory\n"; + print "sysRAMUsage.draw AREA\n"; + } + + $response = $session->get_request($sysTempOID); + if (defined $response) { + print "multigraph temp\n"; + print "graph_vlabel degC \n"; + print "graph_title Temperature\n"; + print "graph_category sensors\n"; + print "graph_info This graph shows the router's temperature.\n"; + print "sysTemp.label Temperature\n"; + print "sysTemp.type GAUGE\n"; + } + exit 0 unless (($ENV{MUNIN_CAP_DIRTYCONFIG} || 0) == 1); +} + + +print "multigraph flash\n"; +$response = $session->get_request(-varbindlist => [$sysFlashUsageOID, $sysFlashTotalOID]); +if (defined $response) { + print "sysFlashUsage.value ", $response->{$sysFlashUsageOID}*1024, "\n"; + print "sysFlashTotal.value ", $response->{$sysFlashTotalOID}*1024, "\n"; +} else { + print "sysFlashUsage.value U\n"; + print "sysFlashTotal.value U\n"; +} + +print "multigraph ram\n"; +$response = $session->get_request(-varbindlist => [$sysRAMUsageOID, $sysRAMTotalOID]); +if (defined $response) { + print "sysRAMUsage.value ", $response->{$sysRAMUsageOID}*1024, "\n"; + print "sysRAMTotal.value ", $response->{$sysRAMTotalOID}*1024, "\n"; +} else { + print "sysRAMUsage.value U\n"; + print "sysRAMTotal.value U\n"; +} + +print "multigraph temp\n"; +$response = $session->get_request(-varbindlist => [$sysTempOID]); +if (defined $response) { + print "sysTemp.value ", $response->{$sysTempOID}/10, "\n"; +} else { + print "sysTemp.value U\n"; +} + +# vim:syntax=perl diff --git a/plugins/router/speedport_300 b/plugins/router/speedport_300 index 4b5cd531..7f780b1d 100755 --- a/plugins/router/speedport_300 +++ b/plugins/router/speedport_300 @@ -1,8 +1,8 @@ #!/bin/bash # # -# Munin plugin to show the up- / download stream of the actual -# internet connection by reading the top_status.htm from the +# Munin plugin to show the up- / download stream of the actual +# internet connection by reading the top_status.htm from the # Speedport 300 # # diff --git a/plugins/router/technicolor_tc8715d b/plugins/router/technicolor_tc8715d new file mode 100755 index 00000000..c2473d9f --- /dev/null +++ b/plugins/router/technicolor_tc8715d @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 + +""" +=pod + +=encoding utf8 + +=head1 NAME + +technicolor_tc8715d - Munin plugin for graphing statistics from +Technicolor TC8715D cable modems. + +=head1 DESCRIPTION + +The following values are graphed: + +=over + +=item * + +upstream and downstream power levels + +=item * + +downstream signal to noise ratio + +=item * + +downstream signal statistics (codeword counts) + +=back + +=head1 USAGE + +Install as you would with any Munin plugin. No configuration is +needed. Requires the multigraph and dirtyconfig capabilities available +in munin 2.0 and newer. + +=head1 NOTES + +Developed and tested with Python 3.7.3, Technicolor TC8715D cable +modem hardware revision 1.1, software image name +TC8715D-01.EF.04.38.00-180405-S-FF9-D.img, advanced services +2.6.30-1.0.11mp1-g24a0ad5-dirty. + +=head1 DEVELOPMENT + +The latest version of this plugin can be found in the munin contrib +repository at https://github.com/munin-monitoring/contrib. Issues +with this plugin may be reported there. Patches accepted through the +normal github process of forking the repository and submitting a +pull request with your commits. + +=head1 AUTHOR + +Copyright © 2019 Kenyon Ralph + +=head1 LICENSE + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation, either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . + +=cut + +""" + +import html.parser +import urllib.request +import sys + + +class TechnicolorHTMLParser(html.parser.HTMLParser): + def __init__(self): + html.parser.HTMLParser.__init__(self) + self.signaldatapage = list() + + # Number of downstream channels. + self.downstream_channels = 0 + + # Number of upstream channels. + self.upstream_channels = 0 + + self.downstream_SNRs = list() + self.downstream_powers = list() + self.upstream_powers = list() + self.unerrored_codewords = list() + self.correctable_codewords = list() + self.uncorrectable_codewords = list() + + def handle_data(self, data): + data = data.strip() + if data != "": + self.signaldatapage.append(data) + + def process(self): + # Delete the junk before the statistics start. This list + # should start with 'Downstream' after this deletion. + del self.signaldatapage[0:119] + + index_positions = [i for i, x in enumerate(self.signaldatapage) if x == "Index"] + lock_status_positions = [ + i for i, x in enumerate(self.signaldatapage) if x == "Lock Status" + ] + self.downstream_channels = lock_status_positions[0] - index_positions[0] - 1 + self.upstream_channels = lock_status_positions[1] - index_positions[1] - 1 + + self.downstream_SNRs = self.signaldatapage[ + 6 + 3 * self.downstream_channels : 22 + 3 * self.downstream_channels + ] + self.downstream_SNRs = [x.split()[0] for x in self.downstream_SNRs] + + self.downstream_powers = self.signaldatapage[ + 7 + 4 * self.downstream_channels : 23 + 4 * self.downstream_channels + ] + self.downstream_powers = [x.split()[0] for x in self.downstream_powers] + + self.upstream_powers = self.signaldatapage[ + 15 + + 6 * self.downstream_channels + + 4 * self.upstream_channels : 15 + + 6 * self.downstream_channels + + 5 * self.upstream_channels + ] + self.upstream_powers = [x.split()[0] for x in self.upstream_powers] + + self.unerrored_codewords = self.signaldatapage[ + 19 + + 6 * self.downstream_channels + + 7 * self.upstream_channels : 19 + + 7 * self.downstream_channels + + 7 * self.upstream_channels + ] + + self.correctable_codewords = self.signaldatapage[ + 20 + + 7 * self.downstream_channels + + 7 * self.upstream_channels : 20 + + 8 * self.downstream_channels + + 7 * self.upstream_channels + ] + + self.uncorrectable_codewords = self.signaldatapage[ + 21 + + 8 * self.downstream_channels + + 7 * self.upstream_channels : 21 + + 9 * self.downstream_channels + + 7 * self.upstream_channels + ] + + +if len(sys.argv) != 2 or sys.argv[1] != "config": + print( + "Error: plugin designed for the dirtyconfig protocol, must be run with the config argument" + ) + sys.exit(1) + +parser = TechnicolorHTMLParser() + +for line in urllib.request.urlopen("http://192.168.100.1/vendor_network.asp"): + parser.feed(line.decode()) + +parser.process() + +print( + """multigraph technicolor_tc8715d_power +graph_title Technicolor TC8715D Cable Modem Power +graph_vlabel Signal Strength (dBmV) +graph_info This graph shows the channel power values reported by a Technicolor TC8715D cable modem. +graph_category network""" +) + +for i in range(parser.downstream_channels): + print( + f"""ds_power_{i+1}.label Channel {i+1} Downstream Power +ds_power_{i+1}.type GAUGE +ds_power_{i+1}.value {parser.downstream_powers[i]}""" + ) + +for i in range(parser.upstream_channels): + print( + f"""us_power_{i+1}.label Channel {i+1} Upstream Power +us_power_{i+1}.type GAUGE +us_power_{i+1}.value {parser.upstream_powers[i]}""" + ) + +print( + """multigraph technicolor_tc8715d_snr +graph_title Technicolor TC8715D Cable Modem SNR +graph_vlabel Signal-to-Noise Ratio (dB) +graph_info Downstream signal-to-noise ratio reported by a Technicolor TC8715D cable modem. +graph_category network""" +) + +for i in range(parser.downstream_channels): + print( + f"""snr_{i+1}.label Channel {i+1} SNR +snr_{i+1}.type GAUGE +snr_{i+1}.value {parser.downstream_SNRs[i]}""" + ) + +print( + """multigraph technicolor_tc8715d_codewords +graph_title Technicolor TC8715D Cable Modem Codewords +graph_vlabel Codewords/${graph_period} +graph_info Downstream codeword rates reported by a Technicolor TC8715D cable modem. +graph_category network""" +) + +for i in range(parser.downstream_channels): + print( + f"""unerr_{i+1}.label Channel {i+1} Unerrored Codewords +unerr_{i+1}.type DERIVE +unerr_{i+1}.min 0 +unerr_{i+1}.value {parser.unerrored_codewords[i]} +corr_{i+1}.label Channel {i+1} Correctable Codewords +corr_{i+1}.type DERIVE +corr_{i+1}.min 0 +corr_{i+1}.value {parser.correctable_codewords[i]} +uncorr_{i+1}.label Channel {i+1} Uncorrectable Codewords +uncorr_{i+1}.type DERIVE +uncorr_{i+1}.min 0 +uncorr_{i+1}.value {parser.uncorrectable_codewords[i]}""" + ) diff --git a/plugins/router/tg585v7__ b/plugins/router/tg585v7__ index 1f5b9e10..20d4a838 100755 --- a/plugins/router/tg585v7__ +++ b/plugins/router/tg585v7__ @@ -13,7 +13,7 @@ Requires perl and either WWW::Mechanize or Net::Telnet. =head1 CONFIGURATION The plugin needs HTML access to the router. If you can get to http://YOUR_ROUTER/, -and are greeting with a page titled "THOMSON TG585 v7", then you can probably use this plugin. +and are greeting with a page titled "THOMSON TG585 v7", then you can probably use this plugin. This is a wildcard plugin, so you will need to create symlinks to this plugin (or create copies if your filesystem doesn't support linking). Links should be of the form: @@ -112,9 +112,9 @@ print "# Access Mode is: $ACCESS_MODE\n" if $MUNIN_DEBUG; if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { if ($ret) { print "no ($ret)\n"; - exit 1; + } else { + print "yes\n"; } - print "yes\n"; exit 0; } @@ -149,7 +149,7 @@ if ( defined $ARGV[0] and $ARGV[0] eq "config" ) { if ( $mode eq 'bandwidth' ) { print < + + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + + +=cut + + +set -eu + + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + + +LOG_FILE=${rsnapshot_log_file:-/var/log/rsnapshot.log} +RSNAPSHOT_OPERATION_NAME=${rsnapshot_operation_name:-beta} +RSNAPSHOT_DESCRIPTION=${rsnapshot_description:-Backup Duration} + + +# retrieve the latest set of log files for a complete backup operation +get_latest_process_log() { + tac "$LOG_FILE" \ + | awk ' + BEGIN { in_process = 0; } + / '"$RSNAPSHOT_OPERATION_NAME"': completed[, ]/ { in_process = 1; } + / '"$RSNAPSHOT_OPERATION_NAME"': started$/ { if (in_process == 1) exit; } + { if (in_process == 1) print($0); }' \ + | tac +} + + +# parse rsnapshot log lines +# output format: +# BACKUP_NAME DURATION_SECONDS [ERROR_MESSAGES] +get_backups_with_duration() { + get_latest_process_log | while read -r timestamp command details; do + parsed_timestamp=$(date +%s --date "$(printf '%s' "$timestamp" | tr -d '[]')") + if [ -n "${backup_name:-}" ] && printf '%s' "$command" | grep -q "/rsnapshot$" && printf '%s' "$details" | grep -q "ERROR:"; then + backup_errors=$(printf '%s' "$details" | sed 's/^.*ERROR: //') + elif printf '%s' "$command" | grep -q "/rsync$"; then + if [ -n "${backup_name:-}" ]; then + printf '%s\t%d\t%s\n' "$backup_name" "$((parsed_timestamp - backup_start))" "${backup_errors:-}" + fi + backup_name=$(printf '%s' "$details" | sed 's#/$##' | sed 's#^.*/##') + backup_start=$parsed_timestamp + backup_errors= + elif printf '%s' "$command" | grep -q "/rm$"; then + # the backup is finished + if [ -n "${backup_name:-}" ]; then + printf '%s\t%d\t%s\n' "$backup_name" "$((parsed_timestamp - backup_start))" "${backup_errors:-}" + fi + break + fi + done | sort +} + + +do_autoconf() { + if [ -e "$LOG_FILE" ]; then + if [ -r "$LOG_FILE" ]; then + if command -v "tac" >/dev/null; then + echo "yes" + else + echo "no (executable 'tac' (coreutils) is missing)" + fi + else + echo "no (rsnapshot log file is not readable: $LOG_FILE)" + fi + else + echo "no (rsnapshot log file missing: $LOG_FILE)" + fi + exit 0 +} + + +get_backup_fieldname() { + local backup_name="$1" + clean_fieldname "backup_${backup_name}" +} + + +print_backup_details() { + local backup_name="$1" + local backup_duration="$2" + local backup_messages="$3" + local fieldname + fieldname=$(get_backup_fieldname "$backup_name") + printf '%s.value %s\n' "$fieldname" "$backup_duration" + if [ -n "$backup_messages" ]; then + printf '%s.extinfo %s\n' "$fieldname" "$backup_messages" + fi +} + + +do_config() { + local do_emit_values="${1:-0}" + echo "graph_title rsnapshot - $RSNAPSHOT_DESCRIPTION" + echo 'graph_vlabel Duration of backup in minutes' + echo 'graph_category backup' + echo 'graph_scale no' + get_backups_with_duration | while read -r name duration messages; do + fieldname=$(clean_fieldname "backup_$name") + printf '%s.label %s\n' "$fieldname" "$name" + printf '%s.draw %s\n' "$fieldname" "AREASTACK" + # The duration is stored as an SI unit (seconds). + # The visualization as the number of minutes should be suitable for most backups. + printf '%s.cdef %s,60,/\n' "$fieldname" "$fieldname" + if [ "$do_emit_values" = "1" ]; then + print_backup_details "$name" "$duration" "$messages" + fi + done +} + + +do_fetch() { + get_backups_with_duration | while read -r name duration messages; do + print_backup_details "$name" "$duration" "$messages" + done +} + + +case ${1:-} in + autoconf) + do_autoconf + ;; + config) + do_config "${MUNIN_CAP_DIRTYCONFIG:-0}" + ;; + "") + do_fetch + ;; + *) + echo >&2 "Unknown command: $1" + exit 1 + ;; +esac diff --git a/plugins/rsync/rsyncd_bytes b/plugins/rsync/rsyncd_bytes index d290282b..750e6abb 100755 --- a/plugins/rsync/rsyncd_bytes +++ b/plugins/rsync/rsyncd_bytes @@ -12,7 +12,7 @@ mktempfile () { mktemp -t $1 -} +} RSYNCD_LOG=${logfile:-/var/log/rsyncd.log} LOGTAIL=${logtail:-`which logtail`} @@ -21,11 +21,10 @@ STATEFILE=$MUNIN_PLUGSTATE/rsync-bytes.offset if [ "$1" = "autoconf" ]; then if [ -f "${RSYNCD_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/rsync/rsyncd_count b/plugins/rsync/rsyncd_count index 9b3a8afd..ebb10764 100755 --- a/plugins/rsync/rsyncd_count +++ b/plugins/rsync/rsyncd_count @@ -12,7 +12,7 @@ mktempfile () { mktemp -t $1 -} +} RSYNCD_LOG=${logfile:-/var/log/rsyncd.log} LOGTAIL=${logtail:-`which logtail`} @@ -21,11 +21,10 @@ STATEFILE=$MUNIN_PLUGSTATE/rsync-count.offset if [ "$1" = "autoconf" ]; then if [ -f "${RSYNCD_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/rtorrent/rtom_allsessions_mem b/plugins/rtorrent/rtom_allsessions_mem index 1b12be09..fc203d5c 100755 --- a/plugins/rtorrent/rtom_allsessions_mem +++ b/plugins/rtorrent/rtom_allsessions_mem @@ -44,21 +44,17 @@ #%# family=auto -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { - my $category = $ENV{"category"} || ""; - print "graph_title rTorrent memory usage\n"; - print "graph_args --base 1024 --lower-limit 0\n"; - print "graph_vlabel Bytes\n"; - print "graph_category filetransfer".${category}."\n"; - print "mem.label Memory usage\n"; - print "mem.info Memory usage of rTorrent\n"; - print "mem.type GAUGE\n"; - print "mem.draw LINE2\n"; - exit 0; + my $category = $ENV{"category"} || ""; + print "graph_title rTorrent memory usage\n"; + print "graph_args --base 1024 --lower-limit 0\n"; + print "graph_vlabel Bytes\n"; + print "graph_category filetransfer".${category}."\n"; + print "mem.label Memory usage\n"; + print "mem.info Memory usage of rTorrent\n"; + print "mem.type GAUGE\n"; + print "mem.draw LINE2\n"; + exit 0; } use IO::Socket; @@ -67,43 +63,94 @@ my @sockets = split /,/, $ENV{"socket"} || ""; my $ip = $ENV{"ip"} || "127.0.0.1"; my @ports = split /,/, $ENV{"port"} || ""; -my $mem = 0; +# detect rtorrent version +use version; +my %rtorrent_version; +sub get_rtorrent_version { + my $version; + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $version = $1; + } + } + close (SOCK); + $rtorrent_version{$_[0]} = $version; +} +sub rtorrent_version_lower_than { + if (keys %rtorrent_version == 0 && not defined $_[0]){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + get_rtorrent_version $socket; + close (SOCK); + } + } else { + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + get_rtorrent_version $port; + close (SOCK); + } + } + } + if(defined $_[1]){ + return version->parse($rtorrent_version{$_[0]}) < version->parse($_[1]); + } +} +# init rtorrent_version +rtorrent_version_lower_than(); + my $pattern = qr/<(int|i4|i8|ex\.i8)>(\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; -my $line = "get_memory_usage"; -my $llen = length $line; -my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; -my $hlen = length $header; +my $mem = 0; +sub construct_line { + my $function = rtorrent_version_lower_than($_[0], '0.9.0') ? 'get_memory_usage' : 'pieces.memory.current'; + my $line = "$function"; + my $llen = length $line; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line = "${hlen}:${header},${line}"; + return $line; +} if ( ( defined $src ) && ( $src eq "socket" ) ) { - for $socket (@sockets) - { - socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; - connect( SOCK, sockaddr_un( $socket ) ) or die $!; - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - while ( $line = ) { - if ( $line =~ /$pattern/ ) { - $mem = $mem + $2; - } - } - close (SOCK); - } + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + print SOCK construct_line($socket); + flush SOCK; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $mem = $mem + $2; + } + } + close (SOCK); + } } else { - for $port (@ports) - { - socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); - connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - while ( $line = ) { - if ( $line =~ /$pattern/ ) { - $mem = $mem + $2; - } - } - close (SOCK); - } + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + print SOCK construct_line($port); + flush SOCK; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $mem = $mem + $2; + } + } + close (SOCK); + } } print "mem.value ${mem}\n"; diff --git a/plugins/rtorrent/rtom_allsessions_peers b/plugins/rtorrent/rtom_allsessions_peers index a3cc1705..5526668c 100755 --- a/plugins/rtorrent/rtom_allsessions_peers +++ b/plugins/rtorrent/rtom_allsessions_peers @@ -17,14 +17,14 @@ # # Parameters: # -# config required +# config required # # # Configurable variables # -# src "socket" when using scgi_socket, or anything else when using scgi_port -# socket rTorrent's rpc socket (scgi_local) - using scgi_local - needed, when "src" is set to "socket" -# category Change graph category +# src "socket" when using scgi_socket, or anything else when using scgi_port +# socket rTorrent's rpc socket (scgi_local) - using scgi_local - needed, when "src" is set to "socket" +# category Change graph category # # Configuration example # @@ -42,32 +42,28 @@ #%# family=auto -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { - my $category = $ENV{"category"} || ""; - print "graph_title rTorrent peer statistics\n"; - print "graph_args --base 1000 --lower-limit 0\n"; - print "graph_vlabel peers\n"; - print "graph_category filetransfer".${category}."\n"; - print "outgoing.label outgoing\n"; - print "outgoing.draw AREA\n"; - print "outgoing.info number of outgoing connections\n"; - print "incoming.label incoming\n"; - print "incoming.draw STACK\n"; - print "incoming.info number of incoming connections\n"; - print "plain.label plain text\n"; - print "plain.draw LINE2\n"; - print "plain.info number of plain text connections\n"; - print "encrypted.label encrypted\n"; - print "encrypted.draw LINE2\n"; - print "encrypted.info number of encrypted connections\n"; - print "total.label total\n"; - print "total.draw LINE2\n"; - print "total.info total number of connections\n"; - exit 0; + my $category = $ENV{"category"} || ""; + print "graph_title rTorrent peer statistics\n"; + print "graph_args --base 1000 --lower-limit 0\n"; + print "graph_vlabel peers\n"; + print "graph_category filetransfer".${category}."\n"; + print "outgoing.label outgoing\n"; + print "outgoing.draw AREA\n"; + print "outgoing.info number of outgoing connections\n"; + print "incoming.label incoming\n"; + print "incoming.draw STACK\n"; + print "incoming.info number of incoming connections\n"; + print "plain.label plain text\n"; + print "plain.draw LINE2\n"; + print "plain.info number of plain text connections\n"; + print "encrypted.label encrypted\n"; + print "encrypted.draw LINE2\n"; + print "encrypted.info number of encrypted connections\n"; + print "total.label total\n"; + print "total.draw LINE2\n"; + print "total.info total number of connections\n"; + exit 0; } use IO::Socket; @@ -77,13 +73,67 @@ my @sockets = split /,/, $ENV{"socket"} || ""; my $ip = $ENV{"ip"} || "127.0.0.1"; my @ports = split /,/, $ENV{"port"} || ""; -my $pattern = qr/<(int|i4|i8|ex\.i8)>(\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; -my $tpattern = qr/[0-9A-F]{20}/; +# detect rtorrent version +use version; +my %rtorrent_version; +sub get_rtorrent_version { + my $version; + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $version = $1; + } + } + close (SOCK); + $rtorrent_version{$_[0]} = $version; +} +sub rtorrent_version_lower_than { + if (keys %rtorrent_version == 0 && not defined $_[0]){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + get_rtorrent_version $socket; + close (SOCK); + } + } else { + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + get_rtorrent_version $port; + close (SOCK); + } + } + } + if(defined $_[1]){ + return version->parse($rtorrent_version{$_[0]}) < version->parse($_[1]); + } +} +# init rtorrent_version +rtorrent_version_lower_than(); -my $line = "d.multicallmaind.get_hash=p.multicall=,p.is_encrypted=,p.is_incoming="; -my $llen = length $line; -my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; -my $hlen = length $header; +my $pattern = qr/<(int|i4|i8|ex\.i8)>(\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; +my $tpattern = qr/[0-9A-F]{20}/; +sub construct_line { + my $function_multicall = rtorrent_version_lower_than($_[0], '0.9.0') ? 'd.multicall' : 'd.multicall2'; + my $function_multicall_arg = rtorrent_version_lower_than($_[0], '0.9.0') ? '' : ''; + my $function_hash = rtorrent_version_lower_than($_[0], '0.9.0') ? 'd.get_hash=' : 'd.hash='; + my $line = "$function_multicall$function_multicall_argmain$function_hashp.multicall=,p.is_encrypted=,p.is_incoming="; + my $llen = length $line; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line = "${hlen}:${header},${line}"; + return $line; +} my $tor = 0; my $tot = 0; @@ -94,57 +144,54 @@ my $ppline = ""; my $out = 0; my $pla = 0; - if ( ( defined $src ) && ( $src eq "socket" ) ) { - for $socket (@sockets) - { - socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; - connect( SOCK, sockaddr_un( $socket ) ) or die $!; - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - while ( $line = ) { - if ( $line =~ /$tpattern/ ) { - $tor += 1; - } elsif ( $line =~ /$pattern/ ) { - $tot += 1; - $enc += $2; - $line = ; - $line =~ /$pattern/; - $inc += $2; - } - $ppline = $pline; - $pline = $line; - } - close (SOCK); - $out = $out + $tot - $inc; - $pla = $pla + $tot - $enc; - } + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + print SOCK construct_line($socket); + flush SOCK; + while ( $line = ) { + if ( $line =~ /$tpattern/ ) { + $tor += 1; + } elsif ( $line =~ /$pattern/ ) { + $tot += 1; + $enc += $2; + $line = ; + $line =~ /$pattern/; + $inc += $2; + } + $ppline = $pline; + $pline = $line; + } + close (SOCK); + $out = $out + $tot - $inc; + $pla = $pla + $tot - $enc; + } } else { - for $port (@ports) - { - socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); - connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - while ( $line = ) { - if ( $line =~ /$tpattern/ ) { - $tor += 1; - } elsif ( $line =~ /$pattern/ ) { - $tot += 1; - $enc += $2; - $line = ; - $line =~ /$pattern/; - $inc += $2; - } - $ppline = $pline; - $pline = $line; - } - close (SOCK); - $out = $out + $tot - $inc; - $pla = $pla + $tot - $enc; - } + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + print SOCK construct_line($port); + flush SOCK; + while ( $line = ) { + if ( $line =~ /$tpattern/ ) { + $tor += 1; + } elsif ( $line =~ /$pattern/ ) { + $tot += 1; + $enc += $2; + $line = ; + $line =~ /$pattern/; + $inc += $2; + } + $ppline = $pline; + $pline = $line; + } + close (SOCK); + $out = $out + $tot - $inc; + $pla = $pla + $tot - $enc; + } } diff --git a/plugins/rtorrent/rtom_allsessions_spdd b/plugins/rtorrent/rtom_allsessions_spdd index ed89061a..aada54f5 100755 --- a/plugins/rtorrent/rtom_allsessions_spdd +++ b/plugins/rtorrent/rtom_allsessions_spdd @@ -54,35 +54,31 @@ #%# family=auto -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { - my $diff = $ENV{"diff"} || ""; - my $category = $ENV{"category"} || ""; - print "graph_order down up\n"; - print "graph_title rTorrent speeds\n"; - print "graph_args --base 1024\n"; - print "graph_vlabel Bytes per \${graph_period}\n"; - print "graph_category filetransfer".${category}."\n"; - print "down.label Download B/s\n"; - print "down.info Download speed in Bytes per seconds\n"; - print "down.type DERIVE\n"; - print "down.min 0\n"; - print "down.draw AREA\n"; - if ( ( defined $diff ) && ( $diff eq "yes" ) ) { - print "up.label Upload b/s\n"; - print "up.info Upload speed in bits per seconds\n"; - print "up.cdef up,8,*\n"; - } else { - print "up.label Upload B/s\n"; - print "up.info Upload speed in Bytes per seconds\n"; - } - print "up.type DERIVE\n"; - print "up.min 0\n"; - print "up.draw LINE2\n"; - exit 0; + my $diff = $ENV{"diff"} || ""; + my $category = $ENV{"category"} || ""; + print "graph_order down up\n"; + print "graph_title rTorrent speeds\n"; + print "graph_args --base 1024\n"; + print "graph_vlabel Bytes per \${graph_period}\n"; + print "graph_category filetransfer".${category}."\n"; + print "down.label Download B/s\n"; + print "down.info Download speed in Bytes per seconds\n"; + print "down.type DERIVE\n"; + print "down.min 0\n"; + print "down.draw AREA\n"; + if ( ( defined $diff ) && ( $diff eq "yes" ) ) { + print "up.label Upload b/s\n"; + print "up.info Upload speed in bits per seconds\n"; + print "up.cdef up,8,*\n"; + } else { + print "up.label Upload B/s\n"; + print "up.info Upload speed in Bytes per seconds\n"; + } + print "up.type DERIVE\n"; + print "up.min 0\n"; + print "up.draw LINE2\n"; + exit 0; } use IO::Socket; @@ -91,63 +87,118 @@ my @sockets = split /,/, $ENV{"socket"} || ""; my $ip = $ENV{"ip"} || "127.0.0.1"; my @ports = split /,/, $ENV{"port"} || ""; -my $pattern = qr/<(int|i4|i8|ex\.i8)>([-]{0,1}\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; -my $line = "system.multicallmethodNameget_up_totalparamsmethodNameget_down_totalparamsmethodNameget_upload_rateparamsmethodNameget_download_rateparams"; -my $llen = length $line; -my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; -my $hlen = length $header; +# detect rtorrent version +use version; +my %rtorrent_version; +sub get_rtorrent_version { + my $version; + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $version = $1; + } + } + close (SOCK); + $rtorrent_version{$_[0]} = $version; +} +sub rtorrent_version_lower_than { + if (keys %rtorrent_version == 0 && not defined $_[0]){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + get_rtorrent_version $socket; + close (SOCK); + } + } else { + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + get_rtorrent_version $port; + close (SOCK); + } + } + } + if(defined $_[1]){ + return version->parse($rtorrent_version{$_[0]}) < version->parse($_[1]); + } +} +# init rtorrent_version +rtorrent_version_lower_than(); + +my $pattern = qr/<(int|i4|i8|ex\.i8)>([-]{0,1}\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; +sub construct_line { + my $function_totalup = rtorrent_version_lower_than($_[0], '0.9.0') ? 'get_up_total' : 'throttle.global_up.total'; + my $function_totaldown = rtorrent_version_lower_than($_[0], '0.9.0') ? 'get_down_total' : 'throttle.global_down.total'; + my $function_rateup = rtorrent_version_lower_than($_[0], '0.9.0') ? 'get_upload_rate' : 'throttle.global_up.max_rate'; + my $function_ratedown = rtorrent_version_lower_than($_[0], '0.9.0') ? 'get_download_rate' : 'throttle.global_down.max_rate'; + my $line = "system.multicallmethodName$function_totalupparamsmethodName$function_totaldownparamsmethodName$function_rateupparamsmethodName$function_ratedownparams"; + + my $llen = length $line; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line = "${hlen}:${header},${line}"; + return $line; +} my $up = -1; my $down = -1; if ( ( defined $src ) && ( $src eq "socket" ) ) { - for $socket (@sockets) - { - socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; - connect( SOCK, sockaddr_un( $socket ) ) or die $!; - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - my $up_tmp = -1; - my $down_tmp = -1; - while (( $up_tmp == -1 ) && ( $line = ) ) { - if ( $line =~ /$pattern/ ) { - $up_tmp = $2; - } - } - while (( $down_tmp == -1 ) && ( $line = ) ) { - if ( $line =~ /$pattern/ ) { - $down_tmp = $2; - } - } - close (SOCK); - $up = $up + $up_tmp; - $down = $down + $down_tmp; - } + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + print SOCK construct_line($socket); + flush SOCK; + my $up_tmp = -1; + my $down_tmp = -1; + while (( $up_tmp == -1 ) && ( $line = ) ) { + if ( $line =~ /$pattern/ ) { + $up_tmp = $2; + } + } + while (( $down_tmp == -1 ) && ( $line = ) ) { + if ( $line =~ /$pattern/ ) { + $down_tmp = $2; + } + } + close (SOCK); + $up = $up + $up_tmp; + $down = $down + $down_tmp; + } } else { - for $port (@ports) - { - socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); - connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - my $up_tmp = -1; - my $down_tmp = -1; - while (( $up_tmp == -1 ) && ( $line = ) ) { - if ( $line =~ /$pattern/ ) { - $up_tmp = $2; - } - } - while (( $down_tmp == -1 ) && ( $line = ) ) { - if ( $line =~ /$pattern/ ) { - $down_tmp = $2; - } - } - close (SOCK); - $up = $up + $up_tmp; - $down = $down + $down_tmp; - } + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + print SOCK construct_line($port); + flush SOCK; + my $up_tmp = -1; + my $down_tmp = -1; + while (( $up_tmp == -1 ) && ( $line = ) ) { + if ( $line =~ /$pattern/ ) { + $up_tmp = $2; + } + } + while (( $down_tmp == -1 ) && ( $line = ) ) { + if ( $line =~ /$pattern/ ) { + $down_tmp = $2; + } + } + close (SOCK); + $up = $up + $up_tmp; + $down = $down + $down_tmp; + } } diff --git a/plugins/rtorrent/rtom_allsessions_vol b/plugins/rtorrent/rtom_allsessions_vol index 830575f4..5fce5eac 100755 --- a/plugins/rtorrent/rtom_allsessions_vol +++ b/plugins/rtorrent/rtom_allsessions_vol @@ -17,14 +17,14 @@ # # Parameters: # -# config required +# config required # # # Configurable variables # -# src "socket" when using scgi_socket, or anything else when using scgi_port -# socket rTorrent's rpc socket (scgi_local) - using scgi_local - needed, when "src" is set to "socket" -# category Change graph category +# src "socket" when using scgi_socket, or anything else when using scgi_port +# socket rTorrent's rpc socket (scgi_local) - using scgi_local - needed, when "src" is set to "socket" +# category Change graph category # # Configuration example # @@ -43,35 +43,31 @@ my @views = ( "default", "started", "stopped", "complete", "incomplete" ); -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { - my $category = $ENV{"category"} || ""; - print "graph_args --base 1000 -r --lower-limit 0\n"; - print "graph_title rTorrent volume\n"; - print "graph_vlabel active torrents\n"; - print "graph_category filetransfer".${category}."\n"; - print "complete.label complete\n"; - print "complete.draw AREA\n"; - print "complete.info complete torrents\n"; - print "incomplete.label incomplete\n"; - print "incomplete.draw STACK\n"; - print "incomplete.info incomplete torrents\n"; - print "stopped.label stopped\n"; - print "stopped.draw LINE2\n"; - print "stopped.info stopped torrents\n"; - print "started.label started\n"; - print "started.draw LINE2\n"; - print "started.info started torrents\n"; - print "default.label total\n"; - print "default.draw LINE2\n"; - print "default.info all torrents\n"; - print "hashing.graph no\n"; - print "seeding.graph no\n"; - print "active.graph no\n"; - exit 0; + my $category = $ENV{"category"} || ""; + print "graph_args --base 1000 -r --lower-limit 0\n"; + print "graph_title rTorrent volume\n"; + print "graph_vlabel active torrents\n"; + print "graph_category filetransfer".${category}."\n"; + print "complete.label complete\n"; + print "complete.draw AREA\n"; + print "complete.info complete torrents\n"; + print "incomplete.label incomplete\n"; + print "incomplete.draw STACK\n"; + print "incomplete.info incomplete torrents\n"; + print "stopped.label stopped\n"; + print "stopped.draw LINE2\n"; + print "stopped.info stopped torrents\n"; + print "started.label started\n"; + print "started.draw LINE2\n"; + print "started.info started torrents\n"; + print "default.label total\n"; + print "default.draw LINE2\n"; + print "default.info all torrents\n"; + print "hashing.graph no\n"; + print "seeding.graph no\n"; + print "active.graph no\n"; + exit 0; } use IO::Socket; @@ -81,47 +77,101 @@ my @sockets = split /,/, $ENV{"socket"} || ""; my $ip = $ENV{"ip"} || "127.0.0.1"; my @ports = split /,/, $ENV{"port"} || ""; -my $pattern = qr/([A-Z0-9]+)<\/string><\/value>/; +# detect rtorrent version +use version; +my %rtorrent_version; +sub get_rtorrent_version { + my $version; + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $version = $1; + } + } + close (SOCK); + $rtorrent_version{$_[0]} = $version; +} +sub rtorrent_version_lower_than { + if (keys %rtorrent_version == 0 && not defined $_[0]){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + get_rtorrent_version $socket; + close (SOCK); + } + } else { + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + get_rtorrent_version $port; + close (SOCK); + } + } + } + if(defined $_[1]){ + return version->parse($rtorrent_version{$_[0]}) < version->parse($_[1]); + } +} +# init rtorrent_version +rtorrent_version_lower_than(); -foreach ( @views ) { - my $num = 0; - my $line = "d.multicall${_}d.get_hash="; +my $pattern = qr/([A-Z0-9]+)<\/string><\/value>/; + +sub construct_line { + my $function_multicall = rtorrent_version_lower_than($_[0], '0.9.0') ? 'd.multicall' : 'd.multicall2'; + my $function_multicall_arg = rtorrent_version_lower_than($_[0], '0.9.0') ? '' : ''; + my $function_hash = rtorrent_version_lower_than($_[0], '0.9.0') ? 'd.get_hash=' : 'd.hash='; + my $line = "$function_multicall$function_multicall_arg${_}$function_hash"; my $llen = length $line; my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; my $hlen = length $header; + $line = "${hlen}:${header},${line}"; + return $line; +} - if ( ( defined $src ) && ( $src eq "socket" ) ) { - for $socket (@sockets) - { - socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; - connect( SOCK, sockaddr_un( $socket ) ) or die $!; - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - while ( $line = ) { - if ( $line =~ /$pattern/ ) { - $num++; - } - } - close (SOCK); - } - } else { - for $port (@ports) - { - socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); - connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); - my $line = "${hlen}:${header},${line}"; - print SOCK $line; - flush SOCK; - while ( $line = ) { - if ( $line =~ /$pattern/ ) { - $num++; - } - } - close (SOCK); - } - } - print "${_}.value ${num}\n"; +foreach ( @views ) { + my $num = 0; + + if ( ( defined $src ) && ( $src eq "socket" ) ) { + for $socket (@sockets) + { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + print SOCK construct_line($socket); + flush SOCK; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $num++; + } + } + close (SOCK); + } + } else { + for $port (@ports) + { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + print SOCK construct_line($port); + flush SOCK; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $num++; + } + } + close (SOCK); + } + } + print "${_}.value ${num}\n"; } exit; diff --git a/plugins/rtorrent/rtom_mem b/plugins/rtorrent/rtom_mem index eb8a5850..63428052 100755 --- a/plugins/rtorrent/rtom_mem +++ b/plugins/rtorrent/rtom_mem @@ -34,7 +34,7 @@ # user username # env.src socket # env.socket /home/user/torrent/.socket/rpc.socket -# env.category Category +# env.category Category # # [rtom_mem] # env.ip 127.0.0.1 @@ -44,10 +44,6 @@ #%# family=auto -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { my $category = $ENV{"category"} || ""; print "graph_title rTorrent memory usage\n"; @@ -68,9 +64,39 @@ my $ip = $ENV{"ip"} || "127.0.0.1"; my $port = $ENV{"port"} || "5000"; my $socket = $ENV{"socket"} || ""; -my $pattern = qr/<(int|i4|i8|ex\.i8)>(\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; +# detect rtorrent version +use version; +my $rtorrent_version; +sub rtorrent_version_lower_than { + if (not length $rtorrent_version){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + } else { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + } + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $rtorrent_version = $1; + } + } + close (SOCK); + } + return version->parse($rtorrent_version) < version->parse($_[0]); +} -my $line = "get_memory_usage"; +my $pattern = qr/<(int|i4|i8|ex\.i8)>(\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; +my $function = rtorrent_version_lower_than('0.9.0') ? 'get_memory_usage' : 'pieces.memory.current'; +my $line = "$function"; my $llen = length $line; my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; my $hlen = length $header; diff --git a/plugins/rtorrent/rtom_peers b/plugins/rtorrent/rtom_peers index dace07bb..29b55934 100755 --- a/plugins/rtorrent/rtom_peers +++ b/plugins/rtorrent/rtom_peers @@ -44,10 +44,6 @@ #%# family=auto -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { my $category = $ENV{"category"} || ""; print "graph_title rTorrent peer statistics\n"; @@ -79,10 +75,43 @@ my $ip = $ENV{"ip"} || "127.0.0.1"; my $port = $ENV{"port"} || "5000"; my $socket = $ENV{"socket"} || ""; +# detect rtorrent version +use version; +my $rtorrent_version; +sub rtorrent_version_lower_than { + if (not length $rtorrent_version){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + } else { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + } + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $rtorrent_version = $1; + } + } + close (SOCK); + } + return version->parse($rtorrent_version) < version->parse($_[0]); +} + my $pattern = qr/<(int|i4|i8|ex\.i8)>(\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; my $tpattern = qr/[0-9A-F]{20}/; -my $line = "d.multicallmaind.get_hash=p.multicall=,p.is_encrypted=,p.is_incoming="; +my $function_multicall = rtorrent_version_lower_than('0.9.0') ? 'd.multicall' : 'd.multicall2'; +my $function_multicall_arg = rtorrent_version_lower_than('0.9.0') ? '' : ''; +my $function_hash = rtorrent_version_lower_than('0.9.0') ? 'd.get_hash=' : 'd.hash='; +my $line = "$function_multicall$function_multicall_argmain$function_hashp.multicall=,p.is_encrypted=,p.is_incoming="; my $llen = length $line; my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; my $hlen = length $header; diff --git a/plugins/rtorrent/rtom_spdd b/plugins/rtorrent/rtom_spdd index cbb06c14..b4476e35 100755 --- a/plugins/rtorrent/rtom_spdd +++ b/plugins/rtorrent/rtom_spdd @@ -57,10 +57,6 @@ #%# family=auto -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { my $diff = $ENV{"diff"} || ""; my $category = $ENV{"category"} || ""; @@ -107,9 +103,43 @@ my $ip = $ENV{"ip"} || "127.0.0.1"; my $port = $ENV{"port"} || "5000"; my $socket = $ENV{"socket"} || ""; +# detect rtorrent version +use version; +my $rtorrent_version; +sub rtorrent_version_lower_than { + if (not length $rtorrent_version){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + } else { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + } + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $rtorrent_version = $1; + } + } + close (SOCK); + } + return version->parse($rtorrent_version) < version->parse($_[0]); +} my $pattern = qr/<(int|i4|i8|ex\.i8)>([-]{0,1}\d+)<\/(int|i4|i8|ex\.i8)><\/value>/; -my $line = "system.multicallmethodNameget_up_totalparamsmethodNameget_down_totalparamsmethodNameget_upload_rateparamsmethodNameget_download_rateparams"; +my $function_totalup = rtorrent_version_lower_than('0.9.0') ? 'get_up_total' : 'throttle.global_up.total'; +my $function_totaldown = rtorrent_version_lower_than('0.9.0') ? 'get_down_total' : 'throttle.global_down.total'; +my $function_rateup = rtorrent_version_lower_than('0.9.0') ? 'get_upload_rate' : 'throttle.global_up.max_rate'; +my $function_ratedown = rtorrent_version_lower_than('0.9.0') ? 'get_download_rate' : 'throttle.global_down.max_rate'; +my $line = "system.multicallmethodName$function_totalupparamsmethodName$function_totaldownparamsmethodName$function_rateupparamsmethodName$function_ratedownparams"; + my $llen = length $line; my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; my $hlen = length $header; diff --git a/plugins/rtorrent/rtom_vol b/plugins/rtorrent/rtom_vol index f8e08e33..59f15136 100755 --- a/plugins/rtorrent/rtom_vol +++ b/plugins/rtorrent/rtom_vol @@ -45,10 +45,6 @@ my @views = ( "default", "started", "stopped", "complete", "incomplete" ); -if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { - exit 1; -} - if ( $ARGV[0] and $ARGV[0] eq "config" ) { my $category = $ENV{"category"} || ""; print "graph_args --base 1000 -r --lower-limit 0\n"; @@ -83,6 +79,40 @@ my $ip = $ENV{"ip"} || "127.0.0.1"; my $port = $ENV{"port"} || "5000"; my $socket = $ENV{"socket"} || ""; +# detect rtorrent version +use version; +my $rtorrent_version; +sub rtorrent_version_lower_than { + if (not length $rtorrent_version){ + if ( ( defined $src ) && ( $src eq "socket" ) ) { + socket( SOCK, PF_UNIX, SOCK_STREAM, 0 ) or die; + connect( SOCK, sockaddr_un( $socket ) ) or die $!; + } else { + socket( SOCK, PF_INET, SOCK_STREAM, getprotobyname( "tcp" ) ); + connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); + } + my $line_version= "system.client_version"; + my $llen = length $line_version; + my $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; + my $hlen = length $header; + $line_version= "${hlen}:${header},${line_version}"; + print SOCK $line_version; + flush SOCK; + my $pattern = qr/([0-9.]+)<\/string><\/value>/; + while ( $line = ) { + if ( $line =~ /$pattern/ ) { + $rtorrent_version = $1; + } + } + close (SOCK); + } + if(defined $_[0]){ + return version->parse($rtorrent_version) < version->parse($_[0]); + } +} +# init rtorrent_version +rtorrent_version_lower_than(); + my $pattern = qr/([A-Z0-9]+)<\/string><\/value>/; my $line; @@ -90,6 +120,10 @@ my $llenmy; my $header; my $hlen; +my $function_multicall; +my $function_multicall_arg; +my $function_hash; + my $num; foreach ( @views ) { if ( ( defined $src ) && ( $src eq "socket" ) ) { @@ -100,7 +134,11 @@ foreach ( @views ) { connect( SOCK, sockaddr_in( $port, inet_aton( $ip ) ) ); } - $line = "d.multicall${_}d.get_hash="; + $function_multicall = rtorrent_version_lower_than('0.9.0')? 'd.multicall' : 'd.multicall2'; + $function_multicall_arg = rtorrent_version_lower_than('0.9.0') ? '' : ''; + $function_hash = rtorrent_version_lower_than('0.9.0')? 'd.get_hash=' : 'd.hash='; + $line = "$function_multicall$function_multicall_arg${_}$function_hash"; + $llen = length $line; $header = "CONTENT_LENGTH\000${llen}\000SCGI\001\000"; $hlen = length $header; diff --git a/plugins/s3/s3_items b/plugins/s3/s3_items index 44511240..1f716c65 100755 --- a/plugins/s3/s3_items +++ b/plugins/s3/s3_items @@ -3,19 +3,18 @@ use strict; -my $s3_id = exists $ENV{'s3_id'} ? $ENV{'s3_id'} : "user"; +my $s3_id = exists $ENV{'s3_id'} ? $ENV{'s3_id'} : "user"; my $s3cmd = 's3curl.pl --id ' . $s3_id . ' http://s3.amazonaws.com/'; if ( $ARGV[0] eq "autoconf" ) { if (`/usr/bin/perl $0` eq "" ) { - print "no\n"; - exit 1; + print "no\n"; } else { - print "yes\n"; - exit 0; + print "yes\n"; } + exit 0; } @@ -29,14 +28,14 @@ sub get_bucket_list() { my @bucket_list = (); my $pos = 0; - + while ($str =~ /.([\w._-]+)<\/Name>/) { $bucket_list[$pos++] = $1; $str = $'; - } + } - return @bucket_list; + return @bucket_list; } @@ -47,16 +46,16 @@ my ($name) = @_; my $stats = `$s3cmd_local`; my %res; - - $res{'size'} = 0; - $res{'count'} = 0; - + + $res{'size'} = 0; + $res{'count'} = 0; + while ($stats =~ /.([\w._-]+)<\/Size>/) { $stats = $'; - + $res{'size'} += $1; $res{'count'}++; - } + } return %res; } @@ -69,32 +68,32 @@ if ( $ARGV[0] eq "config" ) { print "graph_category cloud\n"; print "graph_vlabel items\n"; print 'graph_info Plugin available at http://www.ohardt.com/dev/munin/' . "\n"; - + my @bucket_list = get_bucket_list(); - + my $bucket_name; - + foreach $bucket_name ( @bucket_list ) { - - print $bucket_name . ".label Bucket " . $bucket_name . "\n"; - - } + + print $bucket_name . ".label Bucket " . $bucket_name . "\n"; + + } exit; - - + + } my @bucket_list = get_bucket_list(); my $bucket_name; - + foreach $bucket_name ( @bucket_list ) { - - my %stats = get_bucket_stats( $bucket_name ); - + + my %stats = get_bucket_stats( $bucket_name ); + print $bucket_name . ".value " . $stats{'count'} . "\n"; } - \ No newline at end of file + diff --git a/plugins/s3/s3_storage b/plugins/s3/s3_storage index ac73a125..314590f1 100755 --- a/plugins/s3/s3_storage +++ b/plugins/s3/s3_storage @@ -3,7 +3,7 @@ use strict; use warnings; -my $s3_id = exists $ENV{'s3_id'} ? $ENV{'s3_id'} : "user"; +my $s3_id = exists $ENV{'s3_id'} ? $ENV{'s3_id'} : "user"; my $s3curl = "perl s3-curl/s3curl.pl --id $s3_id -- -s -S"; sub get_bucket_list() @@ -11,13 +11,13 @@ sub get_bucket_list() my $buckets = `$s3curl http://s3.amazonaws.com`; my $str = $buckets; my @bucket_list; - + while ($buckets =~ s/.([\w._-]+)<\/Name>//) { push @bucket_list, $1; - } + } - return @bucket_list; + return @bucket_list; } my @bucket_list = split /\s+/, ($ENV{'buckets'} || ''); @@ -28,16 +28,12 @@ if (not @bucket_list) if ($ARGV[0] and $ARGV[0] eq "autoconf") { - if (@bucket_list) - { + if (@bucket_list) { print "yes\n"; - exit 0; - } - else - { + } else { print "no\n"; - exit 1; } + exit 0; } sub get_bucket_stats @@ -45,9 +41,9 @@ sub get_bucket_stats my ($name) = @_; my $stats = `$s3curl http://$name.s3.amazonaws.com`; my %res; - + $res{'size'} = 0; - + while ($stats =~ s/.([\w._-]+)<\/Size>//) { $res{'size'} += $1; @@ -65,18 +61,18 @@ if ($ARGV[0] and $ARGV[0] eq "config") print "graph_args --base 1024 -l 0\n"; print "graph_vlabel bytes\n"; print "graph_info Plugin available at https://github.com/aptivate/munin-contrib/blob/master/plugins/s3/s3_storage\n"; - + foreach my $bucket_name (@bucket_list) { print "$bucket_name.label Bucket $bucket_name\n"; - } + } exit; } foreach my $bucket_name (@bucket_list) { - my %stats = get_bucket_stats($bucket_name); + my %stats = get_bucket_stats($bucket_name); print "$bucket_name.value " . $stats{'size'} . "\n"; } diff --git a/plugins/sabnzbd/sabnzbd_dataleft b/plugins/sabnzbd/sabnzbd_dataleft old mode 100644 new mode 100755 index 1f56ea8b..106e1739 --- a/plugins/sabnzbd/sabnzbd_dataleft +++ b/plugins/sabnzbd/sabnzbd_dataleft @@ -6,7 +6,7 @@ # # SABnzbd : http://sabnzbd.org/ # -# This program is free software: you can redistribute it and/or modify +# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. @@ -26,7 +26,7 @@ # [sabnzbd*] # env.host http://host:port/ # env.api apikey -# +# use strict; use XML::Simple; @@ -35,7 +35,7 @@ use LWP::UserAgent; #defines my $HOST = exists $ENV{'host'} ? $ENV{'host'} : "http://localhost:8080/"; my $API = exists $ENV{'api'} ? $ENV{'api'} : ""; -my $URL = $HOST."/sabnzbd/api?mode=qstatus&output=xml&apikey=".$API; +my $URL = $HOST."/sabnzbd/api?mode=queue&output=xml&apikey=".$API; my $sURL = sprintf $URL; #config output @@ -62,7 +62,7 @@ my $req = $get->get($sURL); my $vals = $req->content(); my $xmlvals = $xml->XMLin($vals); -#get/output vals +#get/output vals my $left = $xmlvals->{mbleft}; $left =~ /(\d+)\./; print "rem.value ".$1."\n"; diff --git a/plugins/sabnzbd/sabnzbd_speed b/plugins/sabnzbd/sabnzbd_speed old mode 100644 new mode 100755 index fb466c7b..bbd5b3f9 --- a/plugins/sabnzbd/sabnzbd_speed +++ b/plugins/sabnzbd/sabnzbd_speed @@ -6,7 +6,7 @@ # # SABnzbd : http://sabnzbd.org/ # -# This program is free software: you can redistribute it and/or modify +# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. @@ -26,7 +26,7 @@ # [sabnzbd*] # env.host http://host:port/ # env.api apikey -# +# use strict; use XML::Simple; @@ -35,7 +35,7 @@ use LWP::UserAgent; #defines my $HOST = exists $ENV{'host'} ? $ENV{'host'} : "http://localhost:8080/"; my $API = exists $ENV{'api'} ? $ENV{'api'} : ""; -my $URL = $HOST."/sabnzbd/api?mode=qstatus&output=xml&apikey=".$API; +my $URL = $HOST."/sabnzbd/api?mode=queue&output=xml&apikey=".$API; my $sURL = sprintf $URL; #config output @@ -65,4 +65,4 @@ my $xmlvals = $xml->XMLin($vals); #get/output vals my $speed = $xmlvals->{kbpersec}; $speed =~ /(\d+)\./; -print "speed.value ".$1."\n"; +print "speed.value ".$1."\n"; diff --git a/plugins/samba/samba_locked b/plugins/samba/samba_locked index d591510e..2f1ab9aa 100755 --- a/plugins/samba/samba_locked +++ b/plugins/samba/samba_locked @@ -8,16 +8,16 @@ # autoconf (optional - used by munin-config) # # $Log$ -# Revision 1.0 2007/04/16 Jon Higgs -# Initial Release - Adapted from jimmyo's processses plugin. +# Revision 1.0 2007/04/16 Jon Higgs +# Initial Release - Adapted from jimmyo's processes plugin. # -# Magick markers (optional - used by munin-config and som installation +# Magick markers (optional - used by munin-config and some installation # scripts): #%# family=auto #%# capabilities=autoconf if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -25,7 +25,7 @@ if [ "$1" = "config" ]; then echo 'graph_title Samba Locked Files' echo 'graph_args --base 1000 -l 0 ' - echo 'graph_vlabel number of locked files' + echo 'graph_vlabel number of locked files' echo 'graph_category fs' echo 'graph_info This graph shows the number locked Samba Files.' echo 'samba_locked.label Locked Files' diff --git a/plugins/samba/samba_users b/plugins/samba/samba_users index 4bf00d56..6aafd06b 100755 --- a/plugins/samba/samba_users +++ b/plugins/samba/samba_users @@ -8,22 +8,22 @@ # autoconf (optional - used by munin-config) # # $Log$ -# Revision 1.0 2007/04/16 Jon Higgs -# Initial Release - Adapted from jimmyo's processses plugin. +# Revision 1.0 2007/04/16 Jon Higgs +# Initial Release - Adapted from jimmyo's processes plugin. # # Revision 1.1 2014/07/24 MangaII # Add exit 0 # WARNING : Samba 3.6 and newer block access to smbstatus for no root user -# On Debian make a "chmod a+w /run/samba/sessionid.tdb" +# On Debian make a "chmod a+w /run/samba/sessionid.tdb" # smbstatus must open this file with RW option # -# Magick markers (optional - used by munin-config and som installation +# Magick markers (optional - used by munin-config and some installation # scripts): #%# family=auto #%# capabilities=autoconf if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -31,7 +31,7 @@ if [ "$1" = "config" ]; then echo 'graph_title Samba Users' echo 'graph_args --base 1000 -l 0 ' - echo 'graph_vlabel number of Samba users.' + echo 'graph_vlabel number of Samba users.' echo 'graph_category fs' echo 'graph_info This graph shows the number Samba users.' echo 'samba_users.label Samba Users' diff --git a/plugins/san/emc_comprehensive/emc_comprehensive b/plugins/san/emc_comprehensive/emc_comprehensive index 174944aa..e107408f 100755 --- a/plugins/san/emc_comprehensive/emc_comprehensive +++ b/plugins/san/emc_comprehensive/emc_comprehensive @@ -46,14 +46,14 @@ FILE: for my $file (@files) { my $object_name = shift @row; my $epoch = shift @row; my $owner_array_name = shift @row; - + # Ignore if too old next if ($epoch <= $spool_fetch_epoch); # Don't do too much work : 4h each time is enough $first_epoch ||= $epoch; next if $epoch > $first_epoch + 60 * 60 * 4; - + # Store Values for (my $idx = 0; $idx < $nb_headers; $idx ++) { @@ -66,7 +66,7 @@ FILE: for my $file (@files) { # Ignore non numeric values next unless $value =~ m/^[0-9.]+$/; - # Ignore Optimal/NonOptimal valuse + # Ignore Optimal/NonOptimal values next unless ($fieldset{$field_name} || $field_name !~ /[oO]ptimal/); $fieldset{$field_name} = 1 unless $fieldset{$field_name}; @@ -91,26 +91,26 @@ multigraph san graph_title Vue globale graph_order \ cpu_sp_a=san.cpu.sp_a \ - cpu_sp_b=san.cpu.sp_b + cpu_sp_b=san.cpu.sp_b EOF ; # CPU my @object_names = keys %$values; - gen_multigraph( - $values, + gen_multigraph( + $values, "cpu", [ grep { /^SP / } @object_names ], ); # DISK - gen_multigraph( + gen_multigraph( $values, "disk", [ grep { /^Bus \d+/ } @object_names ], ); # Port - gen_multigraph( + gen_multigraph( $values, "port", [ grep { /^Port / } @object_names ], @@ -118,7 +118,7 @@ EOF ); # Pool - gen_multigraph( + gen_multigraph( $values, "pool", [ grep { /^Pool / } @object_names ], @@ -130,17 +130,17 @@ multigraph san.lun graph_title Luns Global graph_order \ cpu_sp_a=san.cpu.sp_a \ - cpu_sp_b=san.cpu.sp_b + cpu_sp_b=san.cpu.sp_b EOF ; # LUN Per Host my %host_seen; - my @hosts = grep { $_ ne "" } grep { ! $host_seen{$_}++ } + my @hosts = grep { $_ ne "" } grep { ! $host_seen{$_}++ } map { $1 if /^\w+ \[\d+; (\w+)/ } @object_names; for my $host (@hosts) { my $host_field = $host; $host_field =~ tr/./_/; - gen_multigraph( + gen_multigraph( $values, "lun.$host_field", [ grep { /^\w+ \[\d+; $host/ } @object_names ], @@ -153,7 +153,7 @@ EOF last; } -sub gen_multigraph +sub gen_multigraph { my ($values, $category, $object_names, $convert_to_label, $convert_to_field) = @_; @@ -169,7 +169,7 @@ multigraph san.$category graph_title $category Global graph_order \ cpu_sp_a=san.cpu.sp_a \ - cpu_sp_b=san.cpu.sp_b + cpu_sp_b=san.cpu.sp_b EOF ; @@ -191,7 +191,7 @@ EOF } } -sub hash_field_name +sub hash_field_name { my $name = shift; $name = lc($name); @@ -212,7 +212,7 @@ sub trim sub file_mtime { my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, $atime,$mtime,$ctime,$blksize,$blocks) = stat(shift); - return $mtime; + return $mtime; } __DATA__ @@ -254,7 +254,7 @@ EOF print ".\n"; my %MONTHS = get_months(); -sub convert_to_epoch +sub convert_to_epoch { # converts "05/12/2011 03:57" to EPOCH my ($date, $time) = split(/ /); @@ -302,7 +302,7 @@ multigraph san graph_title Vue globale graph_order \ cpu_sp_a=san.cpu.cpu_sp_a \ - cpu_sp_b=san.cpu.cpu_sp_b + cpu_sp_b=san.cpu.cpu_sp_b multigraph san.cpu graph_title Utilization (%) cpu_sp_a.label Utilization (%) for SP A diff --git a/plugins/sar/iostat-cputps-average b/plugins/sar/iostat-cputps-average index bcbde46e..4f5e20b1 100755 --- a/plugins/sar/iostat-cputps-average +++ b/plugins/sar/iostat-cputps-average @@ -17,16 +17,14 @@ export LANG=en_US.UTF-8 if [ "$1" == "autoconf" ]; then if ( sar 1 1 >/dev/null 2>&1 ); then echo yes - exit 0 else if [ $? -eq 127 ]; then echo "no (could not run \"sar\")" - exit 1 else echo no - exit 1 fi fi + exit 0 fi ARRAY=( `sar -p -d -s ${tenMago} -e ${current} | grep -v nodev | grep "Average" | awk '{ print $2 , $3 , $10 }'` ) diff --git a/plugins/scalix/scalix_clients b/plugins/scalix/scalix_clients index a4bdfc88..d4880116 100755 --- a/plugins/scalix/scalix_clients +++ b/plugins/scalix/scalix_clients @@ -1,12 +1,12 @@ #!/bin/sh -# +# # Plugin to monitor the Scalix email system: Client statistics # # (C) 2008 P.Holzleitner # #%# family=contrib -OMSTAT="/opt/scalix/bin/omstat" +OMSTAT="/opt/scalix/bin/omstat" if [ "$1" = "config" ]; then diff --git a/plugins/scalix/scalix_indexwork b/plugins/scalix/scalix_indexwork index 2b1a0a57..4b53a052 100755 --- a/plugins/scalix/scalix_indexwork +++ b/plugins/scalix/scalix_indexwork @@ -1,12 +1,12 @@ #!/usr/bin/perl -# +# # Plugin to monitor the Scalix email system. # # $Log$ # # # Parameters: -# +# # config (required) # autoconf (optional - used by munin-config) # diff --git a/plugins/scalix/scalix_processes b/plugins/scalix/scalix_processes index 6c39052e..6a620c6e 100755 --- a/plugins/scalix/scalix_processes +++ b/plugins/scalix/scalix_processes @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the Scalix email system: Process statistics # # (C) 2008 P.Holzleitner diff --git a/plugins/scalix/scalix_queues b/plugins/scalix/scalix_queues index 50ea7607..802a44bc 100755 --- a/plugins/scalix/scalix_queues +++ b/plugins/scalix/scalix_queues @@ -1,5 +1,5 @@ #!/bin/sh -# +# # Plugin to monitor the Scalix email system: Queue statistics # # (C) 2008 P.Holzleitner @@ -7,7 +7,7 @@ # #%# family=contrib -OMSTAT="/opt/scalix/bin/omstat" +OMSTAT="/opt/scalix/bin/omstat" if [ "$1" = "config" ]; then diff --git a/plugins/security/fail2ban_ b/plugins/security/fail2ban_ old mode 100644 new mode 100755 diff --git a/plugins/security/forefront_ b/plugins/security/forefront_ index 951db439..47ce449d 100755 --- a/plugins/security/forefront_ +++ b/plugins/security/forefront_ @@ -2,8 +2,8 @@ # # Plugin to monitor Forefront Client Security status in MOM database # -# Copyright (c) 2008 Rune Nordbe Skillingstad - -# +# Copyright (c) 2008 Rune Nordbøe Skillingstad - +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 dated June, 1991. @@ -18,17 +18,17 @@ # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, # USA. # -# Parameters: +# Parameters: # # config # autoconf # suggest # # Config variables -# +# # dsn - If DSN name differs from hostname # dbuser - Valid MS SQL user (Windows authentication is possible using "DOMAIN\user") -# dbpass - Password +# dbpass - Password # # Install guide: # This plugin relies on correct configured ODBC for the MOM database @@ -36,28 +36,28 @@ # * Install and configure FreeTDS and DBD::Sybase (packages tdsodbc and libdbd-sybase-perl on Ubuntu) # - DBD::Sybase is preferred over ODBC because of strange TEXT field handling in DBD::ODBC # -# Example +# Example # /etc/freetds/freetds.conf: # [MyHost] # host = MyHost.domain.tld # port = 1433 # tds version = 7.0 -# +# # Copy this script to /usr/share/munin/plugins and run "munin-node-configure --shell" # If freetds.conf has one or more lines containing "host = ", the output will be something like this: # ln -s /usr/share/munin/plugins/forefront_ /etc/munin/plugins/forefront_MyHost.Domain.tld_computers # ln -s /usr/share/munin/plugins/forefront_ /etc/munin/plugins/forefront_MyHost.domain.tld_deployments # ln -s /usr/share/munin/plugins/forefront_ /etc/munin/plugins/forefront_MyHost.domain.tld_status # -# To manually add, symlink forefront_ to forefront_MyHost.domain.tld_computers, -# forefront_MyHost.domain.tld_deployments and forefront_MyHost.domain.tld_status +# To manually add, symlink forefront_ to forefront_MyHost.domain.tld_computers, +# forefront_MyHost.domain.tld_deployments and forefront_MyHost.domain.tld_status # # Add your DSN and user/password to /etc/munin/plugin-conf.d/munin-node: # [forefront_MyHost.domain.tld_*] # env.dsn MyHost # env.dbuser # env.dbpass -# +# # On your munin server, add this to /etc/munin/munin.conf # # [MyHost.domain.tld] @@ -101,10 +101,10 @@ if(!eval "require MIME::Base64;") { if($ARGV[0] and $ARGV[0] eq "autoconf") { if($ret) { - print "no ($ret)\n"; - exit 1; + print "no ($ret)\n"; + } else { + print "yes\n"; } - print "yes\n"; exit 0; } @@ -211,7 +211,7 @@ EOF my %profiles = &deployments_general(); foreach my $policy (sort(keys(%profiles))) { my $field = encode_base64($policy); - chomp($field); + chomp($field); print $field . ".label " . $profiles{$policy}{'name'} . "\n"; print $field . ".draw LINE2\n"; print $field . ".info Numer of computers having the " .$profiles{$policy}{'name'} . " profile.\n"; @@ -268,7 +268,7 @@ sub status_fetch { while(my @row = $sth->fetchrow_array) { $alerts{$row[0]} = 0; } - $sth = $dbh->prepare("SELECT al.Level, COUNT(a.AlertLevel) FROM Alert a, AlertLevel al WHERE a.AlertLevel = al.Level AND a.ResolutionState <> 255 GROUP BY al.Level", + $sth = $dbh->prepare("SELECT al.Level, COUNT(a.AlertLevel) FROM Alert a, AlertLevel al WHERE a.AlertLevel = al.Level AND a.ResolutionState <> 255 GROUP BY al.Level", {odbc_exec_direct => 1}); $sth->execute(); while(my @row = $sth->fetchrow_array) { @@ -300,7 +300,7 @@ sub deployments_general { $profiles{'d3b75be9-7125-4db1-8b24-93004bd9d88e'}{'instance'} = ""; $profiles{'d3b75be9-7125-4db1-8b24-93004bd9d88e'}{'count'} = 0; my $dbh = DBI->connect("DBI:Sybase:$dsn", $dbuser, $dbpass, { PrintError => 1, AutoCommit => 1}); - + my $sth = $dbh->prepare("SELECT Id, Name, LatestInstanceID From fcs_Profiles", {odbc_exec_direct => 1}); $sth->execute(); while(my @row = $sth->fetchrow_array) { diff --git a/plugins/senderscore/senderscore b/plugins/senderscore/senderscore old mode 100644 new mode 100755 index 8f5e319f..a7c2886f --- a/plugins/senderscore/senderscore +++ b/plugins/senderscore/senderscore @@ -2,7 +2,7 @@ # # Here's a plugin which lets you monitor your senderscore.org reputation. For # people sending a large amount of email - this might be useful. -# +# # Config env var used: # [senderscore] # env.senderscore_check_ip xxx.yyy.zzz.ttt @@ -44,7 +44,7 @@ def print_config(): graph_title senderscore reputation graph_info This graph shows senderscore.org reputation metrics graphs_args --upper-limit 100 -u 100 -l 0 -graph_category other +graph_category spamfilter graph_vlabel score graph_scale no @@ -75,7 +75,7 @@ if __name__ == "__main__": if os.environ['senderscore_check_ip'] == '': print "env.senderscore_check_ip not defined in munin config" sys.exit(1) - + ip = os.environ['senderscore_check_ip'] if len(sys.argv) > 1 and sys.argv[1] != '': diff --git a/plugins/sendmail/sendmail_mailq b/plugins/sendmail/sendmail_mailq old mode 100644 new mode 100755 index 64139226..e2bd69f7 --- a/plugins/sendmail/sendmail_mailq +++ b/plugins/sendmail/sendmail_mailq @@ -46,7 +46,7 @@ Jose-Marcio Martins da Cruz - mailto:Jose-Marcio.Martins@mines-paristech.fr Ecole Nationale Superieure des Mines de Paris -=head1 VERSION +=head1 VERSION 1.0 - Jan, 04, 2014 diff --git a/plugins/sensors/alertme_keyfobsathome b/plugins/sensors/alertme_keyfobsathome index 1fbe3a1b..35179606 100755 --- a/plugins/sensors/alertme_keyfobsathome +++ b/plugins/sensors/alertme_keyfobsathome @@ -4,7 +4,7 @@ # alertme_power relies on http://code.google.com/p/alertmepi/ # to have been installed and working # -# 20110120 - update to use correct mixed case for AlertMe and remove cruft +# 20110120 - update to use correct mixed case for AlertMe and remove cruft # left over from creating this plugin from another script # # add to the plugins-conf.d/munin so that it can read the /etc/alertme files @@ -26,7 +26,7 @@ echo -n "KeyfobsAtHome.value " KFC=`/usr/local/bin/alertmepi.pl -k | wc -l` if [ $? -eq 0 ] ; then - echo $KFC + echo $KFC else echo U fi diff --git a/plugins/sensors/alertme_power b/plugins/sensors/alertme_power index d64aec40..303d03c8 100755 --- a/plugins/sensors/alertme_power +++ b/plugins/sensors/alertme_power @@ -4,7 +4,7 @@ # alertme_power relies on http://code.google.com/p/alertmepi/ # to have been installed and working # -# 20110120 - update to use correct mixed case for AlertMe and remove cruft +# 20110120 - update to use correct mixed case for AlertMe and remove cruft # left over from creating this plugin from another script # # add to the plugins-conf.d/munin so that it can read the /etc/alertme files diff --git a/plugins/sensors/allnet__ b/plugins/sensors/allnet__ index d27d309f..feec5c13 100755 --- a/plugins/sensors/allnet__ +++ b/plugins/sensors/allnet__ @@ -77,7 +77,7 @@ use XML::Simple; { package AuthAgent; use base 'LWP::UserAgent'; - + sub get_basic_credentials { if (defined($ENV{'username'}) && defined($ENV{'password'})) { return $ENV{'username'}, $ENV{'password'}; @@ -166,7 +166,7 @@ if (defined($ENV{'beextratolerant'})) { } } #print($sensordata->[0]); -foreach $k (keys($sensordata)) { +foreach $k (keys(%$sensordata)) { if ($k =~ m/^n(\d+)$/) { # Special handling: Could be output from the OLD XML interface. my $nr = $1; if (defined($sensordata->{'s'.$nr}) diff --git a/plugins/sensors/hwmon b/plugins/sensors/hwmon index 66e44801..1a890c87 100755 --- a/plugins/sensors/hwmon +++ b/plugins/sensors/hwmon @@ -94,6 +94,13 @@ my %sensors = ( graph_args => "--base 1000 -l 0", denominator => 1000000 # microWatts -> Watts }, + freq => { + inputs => [], + title => "Frequencies", + vlabel => "Hz", + graph_args => "--base 1000 -l 0", + denominator => 1 + }, humidity => { inputs => [], title => "Humidity", diff --git a/plugins/sensors/ip_thermo_125 b/plugins/sensors/ip_thermo_125 index 01dbe765..3ba4fce3 100755 --- a/plugins/sensors/ip_thermo_125 +++ b/plugins/sensors/ip_thermo_125 @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Copyright (C) 2008 Yuriy Sabov -# Version 0.1 +# Version 0.1 # # Plugin to fetch temperature from "IP Thermo 125" ethernet thermometer # available at http://www.procontrol.hu/GyartasFejlesztes/Termekeink/IPThermoSimple/IPThermo125_eng.htm @@ -18,7 +18,7 @@ my ($hostname, $port, $line, $telnet); # "C" = Celsius, "F" = Fahrenheit -my $unit = $ENV{unit} || "C"; +my $unit = $ENV{unit} || "C"; $hostname = "10.10.10.10"; $port = 23; @@ -38,7 +38,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") print "graph_args --base 1000 -l 0\n"; print "graph_category sensors\n"; print "graph_info This graph shows temperature using IP Thermo 125 server.\n"; - + if ($unit =~ /F/) { print "graph_vlabel temp in °F\n"; @@ -48,8 +48,8 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") print "graph_vlabel temp in °C\n"; } print "temperature.label temperature\n"; - - exit 0 + + exit 0; } $telnet = new Net::Telnet (Telnetmode => 0); diff --git a/plugins/sensors/mbmon b/plugins/sensors/mbmon index da8c4480..d19835d9 100755 --- a/plugins/sensors/mbmon +++ b/plugins/sensors/mbmon @@ -19,34 +19,33 @@ my $mbmon = "/usr/local/bin/mbmon"; if ($ARGV[0] eq 'autoconf') { if (-x $mbmon) { print "yes\n"; - exit 0; } else { print "no\n"; - exit 1; } + exit 0; } elsif ($ARGV[0] eq 'config') { print < -# +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. @@ -53,7 +53,7 @@ Nils Henrik Tvetene GPLv2 -=cut +=cut use strict; use warnings; diff --git a/plugins/sfsnmp/snmp__sfsnmp_temp b/plugins/sfsnmp/snmp__sfsnmp_temp index 9161b6a9..a40de986 100755 --- a/plugins/sfsnmp/snmp__sfsnmp_temp +++ b/plugins/sfsnmp/snmp__sfsnmp_temp @@ -5,17 +5,17 @@ # Copyright (C) 2010 Nils Henrik Tvetene # # Author: Nils Henrik Tvetene -# +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. @@ -53,7 +53,7 @@ Nils Henrik Tvetene GPLv2 -=cut +=cut use strict; use warnings; diff --git a/plugins/sfsnmp/snmp__sfsnmp_volt b/plugins/sfsnmp/snmp__sfsnmp_volt index ecb0c7f6..4c274bdd 100755 --- a/plugins/sfsnmp/snmp__sfsnmp_volt +++ b/plugins/sfsnmp/snmp__sfsnmp_volt @@ -5,17 +5,17 @@ # Copyright (C) 2010 Nils Henrik Tvetene # # Author: Nils Henrik Tvetene -# +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. @@ -53,7 +53,7 @@ Nils Henrik Tvetene GPLv2 -=cut +=cut use strict; use warnings; diff --git a/plugins/sge/sge_queue_ b/plugins/sge/sge_queue_ index 791ee9ae..341981d6 100755 --- a/plugins/sge/sge_queue_ +++ b/plugins/sge/sge_queue_ @@ -22,7 +22,7 @@ # # env.sge_settings - Path to SGE settings.sh script, defaults to /opt/sge/default/common/settings.sh # env.title - Graph title, overrides "SGE Queue state". -# env.options - Additional command line options to qstat. +# env.options - Additional command line options to qstat. # # Revisions: # v1.0 2009-07-19 @@ -92,18 +92,17 @@ fi if [ "$1" = "autoconf" ]; then if which qstat > /dev/null; then echo "yes" - exit 0 - else + else echo "no" - exit 1 fi + exit 0 fi # env.options OPTIONS="-g c -q $QUEUE $options" -# qstat -g c example output: -# CLUSTER QUEUE CQLOAD USED RES AVAIL TOTAL aoACDS cdsuE +# qstat -g c example output: +# CLUSTER QUEUE CQLOAD USED RES AVAIL TOTAL aoACDS cdsuE # -------------------------------------------------------------------------------- # all.q 0.00 16 8 48 64 0 0 qstat $OPTIONS | tail -n 1 | awk '{print "unavailable.value " $8 "\nreserved.value " $4 "\nused.value " $3 "\nfree.value " $5-$4}' diff --git a/plugins/sge/sge_queue_xml_ b/plugins/sge/sge_queue_xml_ old mode 100644 new mode 100755 index b20d088f..6fe02f3b --- a/plugins/sge/sge_queue_xml_ +++ b/plugins/sge/sge_queue_xml_ @@ -21,7 +21,7 @@ # # env.sge_settings - Path to SGE settings.sh script, defaults to /opt/sge/default/common/settings.sh # env.title - Graph title, overrides "SGE Queue state". -# env.options - Additional command line options to qstat. +# env.options - Additional command line options to qstat. # env.queues - list of queues to summarize # # Revisions: @@ -35,7 +35,7 @@ SGE_SETTINGS=${sge_settings:-/opt/sge/default/common/settings.sh} -# queues to monitor +# queues to monitor # priority 1: queue name in symlink QUEUE=${0##*_} # priority 2: queue names from environment @@ -91,18 +91,17 @@ XMLSTARLET=$( which xmlstarlet ) if [ "$1" = "autoconf" ]; then if [ -n "$QSTAT" -a -n "$XMLSTARLET" ]; then echo "yes" - exit 0 - else + else echo "no" - exit 1 fi + exit 0 fi -# check requirements -[ -z "$QSTAT" ] && { echo "qstat not found" 1>&2 ; exit 1; } -[ -z "$XMLSTARLET" ] && { echo "xmlstarlet not found" 1>&2 ; exit 1; } +# check requirements +[ -z "$QSTAT" ] && { echo "qstat not found" 1>&2 ; exit 1; } +[ -z "$XMLSTARLET" ] && { echo "xmlstarlet not found" 1>&2 ; exit 1; } -ALL_QUEUES=$( $QSTAT -g c -xml | $XMLSTARLET sel -T -t -m "//cluster_queue_summary/name" -v "node()" -o "," ) +ALL_QUEUES=$( $QSTAT -g c -xml | $XMLSTARLET sel -T -t -m "//cluster_queue_summary/name" -v "node()" -o "," ) [ "$QUEUE" == "Summary" ] && QUEUE="$ALL_QUEUES" if [ "$1" = "suggest" ]; then @@ -130,7 +129,7 @@ xmldemangle() { printvalues() { local IFSBAK=$IFS; unset IFS for i in ${!_SGE_QUEUE_KEYS[@]} - do + do echo "${_SGE_QUEUE_KEYS[$i]}.value ${_SGE_QUEUE_VALUES[$i]}" done IFS=$IFSBAK @@ -153,7 +152,7 @@ do xmldemangle || echo "Error on QUEUE: $qu" 1>&2 for i in ${!_SGE_QUEUE_KEYS[@]} - do + do _SGE_QUEUE_VALUES[$i]=${_SGE_QUEUE_VALUES[$i]:-0} let "_SGE_QUEUE_VALUES[$i] += ${_SGE_XML[$i]:-0}" done diff --git a/plugins/shoutcast/shoutcast b/plugins/shoutcast/shoutcast index 386bf225..b27bb3c7 100755 --- a/plugins/shoutcast/shoutcast +++ b/plugins/shoutcast/shoutcast @@ -85,7 +85,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { print "ax_used_connections.label Peak Listeners\n"; print "ax_used_connections.type GAUGE\n"; - # DJ-Online tag must be eq to Peak Listeners + # DJ-Online tag must be eq to Peak Listeners print "djonline_tag.draw AREA\n"; print "djonline_tag.colour e5ff60\n"; print "djonline_tag.min 0\n"; diff --git a/plugins/shoutcast/shoutcast2_multi b/plugins/shoutcast/shoutcast2_multi index 5f922ba1..e2bc79f3 100755 --- a/plugins/shoutcast/shoutcast2_multi +++ b/plugins/shoutcast/shoutcast2_multi @@ -186,7 +186,7 @@ sub main { =head2 print_active_data - Thie subroutine prints out the active graph values for each stream and ultimately for + The subroutine prints out the active graph values for each stream and ultimately for the entire shoutcast service. Should 1 Stream be active, but 5 streams available, the global graph should show the state as active for the service, but clicking into that graph, should give you a stream level view of which stream was in use during @@ -200,7 +200,7 @@ sub print_active_data { foreach my $sid (sort keys %{$sidDataRef}) { print "multigraph shoutcast2_active.active_sid_$sid\n"; foreach my $dsrc (@{$graphsRef->{sid_active}->{datasrc}}) { - print "$dsrc->{name}.value $sidDataRef->{$sid}->{$dsrc->{xmlkey}}\n"; + print "$dsrc->{name}.value $sidDataRef->{$sid}->{$dsrc->{xmlkey}}\n"; if ($sidDataRef->{$sid}->{$dsrc->{xmlkey}} == 1) { $globalActive = 1; } @@ -216,7 +216,7 @@ sub print_active_data { =head2 print_listener_data This subroutine prints out the listener graph values for each stream and ultimately - adds all of the current users together to show that against the maxserver count in + adds all of the current users together to show that against the maxserver count in the global graph. Clicking on the global graph will reveal a bit more information about the users on a stream by stream basis. @@ -228,7 +228,7 @@ sub print_listener_data { foreach my $sid (sort keys %{$sidDataRef}) { print "multigraph shoutcast2_listeners.listeners_sid_$sid\n"; foreach my $dsrc (@{$graphsRef->{sid_listeners}->{datasrc}}) { - print "$dsrc->{name}.value $sidDataRef->{$sid}->{$dsrc->{xmlkey}}\n"; + print "$dsrc->{name}.value $sidDataRef->{$sid}->{$dsrc->{xmlkey}}\n"; if ($dsrc->{name} eq 'currlisteners') { $globalListeners += $sidDataRef->{$sid}->{$dsrc->{xmlkey}}; } @@ -283,7 +283,7 @@ sub config { } print_active_config($sidDataRef); print_listener_config($sidDataRef); - return; + return; } =head2 print_active_config @@ -409,7 +409,7 @@ sub fetch_sid_data { my $response = $ua->get($url); if ($response->is_success) { my $returnRef = XMLin($response->decoded_content); - return (1, $returnRef); + return (1, $returnRef); } else { return (0, $response->status_line); } @@ -431,7 +431,7 @@ sub fetch_admin_data { if ($response->is_success) { my $returnRef = XMLin($response->decoded_content); if (($returnRef->{STREAMCONFIGS}->{TOTALCONFIGS} > 0) && (defined($returnRef->{STREAMCONFIGS}->{STREAMCONFIG}))) { - return (1, $returnRef); + return (1, $returnRef); } else { return (0, 'Unable to Detect any Stream Configurations'); } diff --git a/plugins/sickbeard/sickbeard b/plugins/sickbeard/sickbeard new file mode 100755 index 00000000..5c3d968d --- /dev/null +++ b/plugins/sickbeard/sickbeard @@ -0,0 +1,123 @@ +#!/usr/bin/perl +# -*- perl -*- + +=head1 NAME + +sickbeard - Munin multigraph plugin for Sick-Beard and forks (e.g., +SickChill) + +=head1 DESCRIPTION + +This plugin uses the Sick-Beard API (also implemented by forks such as +SickChill) to collect and report on the numbers of shows (total and +active), and episodes (downloaded, snatched, and all) currently tracked +by the app. + +It is a drop-in replacement for sickbeard_shows and sickbeard_episodes, +reusing the vast majority of the code, and providing the same graphs and +data series. + +Sick-Beard : http://sickbeard.com/ + +SickChill : https://sickchill.github.io/ + +=head1 REQUIREMENTS + +JSON::Any, LWP::UserAgent + +=head1 CONFIGURATION + +You need to specify the host/port of the Sick-Beard as well as API key +to use. + + [sickbeard*] + env.host http://host:port/ + env.api apikey + +=head1 AUTHORS + +Copyright (C) 2012 - Blauwbek + +Copyright (C) 2012 - Thiago + +Copyright (C) 2019 - Olivier Mehani , for the +multigraph support and better error handling + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +=cut + +use strict; +use JSON::Any; +use LWP::UserAgent; + +#defines +my $HOST = exists $ENV{'host'} ? $ENV{'host'} : "http://localhost:8081/"; +my $API = exists $ENV{'api'} ? $ENV{'api'} : ""; +my $URL = $HOST."/api/".$API."/?cmd=shows.stats"; +my $sURL = sprintf $URL; + +#config output +if(defined $ARGV[0] && $ARGV[0] eq 'config') +{ + print <new; +my $req = $get->get($sURL); +if (!$req->is_success) { + die $req->status_line; +} +my $json = JSON::Any->jsonToObj($req->content()); + +if ($json->{result} eq 'success') { + print "multigraph sickbeard_episodes\n"; + print "total.value $json->{data}->{ep_total}\n"; + print "down.value $json->{data}->{ep_downloaded}\n"; + print "snatched.value $json->{data}->{ep_snatched}\n"; + + print "multigraph sickbeard_shows\n"; + print "total.value $json->{data}->{shows_total}\n"; + print "active.value $json->{data}->{shows_active}\n"; + exit 0; +} else { + die "$json->{message}"; +} diff --git a/plugins/sickbeard/sickbeard_episodes b/plugins/sickbeard/sickbeard_episodes index 8b297006..4670d75d 100755 --- a/plugins/sickbeard/sickbeard_episodes +++ b/plugins/sickbeard/sickbeard_episodes @@ -7,7 +7,7 @@ # # Sick-Beard : http://sickbeard.com/ # -# This program is free software: you can redistribute it and/or modify +# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. @@ -27,7 +27,7 @@ # [sickbeard*] # env.host http://host:port/ # env.api apikey -# +# use strict; use JSON::Any; @@ -45,10 +45,13 @@ if(defined $ARGV[0] && $ARGV[0] eq 'config') print <get($sURL); my $json = JSON::Any->jsonToObj($req->content()); if ($json->{result} eq 'success') { + print "total.value $json->{data}->{ep_total}\n"; print "down.value $json->{data}->{ep_downloaded}\n"; print "snatched.value $json->{data}->{ep_snatched}\n"; - print "total.value $json->{data}->{ep_total}\n"; exit 0; } else { print "$json->{message}\n"; diff --git a/plugins/sickbeard/sickbeard_shows b/plugins/sickbeard/sickbeard_shows index 9d334f50..4b3db267 100755 --- a/plugins/sickbeard/sickbeard_shows +++ b/plugins/sickbeard/sickbeard_shows @@ -7,7 +7,7 @@ # # Sick-Beard : http://sickbeard.com/ # -# This program is free software: you can redistribute it and/or modify +# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. @@ -27,7 +27,7 @@ # [sickbeard*] # env.host http://host:port/ # env.api apikey -# +# use strict; use JSON::Any; @@ -45,9 +45,11 @@ if(defined $ARGV[0] && $ARGV[0] eq 'config') print <get($sURL); my $json = JSON::Any->jsonToObj($req->content()); if ($json->{result} eq 'success') { - print "active.value $json->{data}->{shows_active}\n"; print "total.value $json->{data}->{shows_total}\n"; + print "active.value $json->{data}->{shows_active}\n"; exit 0; } else { print "$json->{message}\n"; diff --git a/plugins/smf/smf_errors b/plugins/smf/smf_errors old mode 100644 new mode 100755 index 5a2dc249..1aeccdee --- a/plugins/smf/smf_errors +++ b/plugins/smf/smf_errors @@ -1,6 +1,6 @@ #!/usr/bin/perl # -# Munin plugin for erorrs count over a SMF forum database +# Munin plugin for errors count over a SMF forum database # # Copyright (C) 2013 - digger (http://simplemachines.ru) # Based on Rowdy Schwachfer (http://rowdy.nl) 's Spotweb plugin @@ -53,5 +53,5 @@ EOC #Errors count my $errors = `$MYSQL $MYSQLOPTS -e 'SELECT COUNT(*) FROM ${DATABASE}.${PREFIX}log_errors'`; -$errors =~ /(\d+)/; +$errors =~ /(\d+)/; print "errors.value ".$1."\n"; diff --git a/plugins/smf/smf_mail_queue b/plugins/smf/smf_mail_queue old mode 100644 new mode 100755 index 99def0a7..4d40c605 --- a/plugins/smf/smf_mail_queue +++ b/plugins/smf/smf_mail_queue @@ -53,5 +53,5 @@ EOC #Emails count my $emails = `$MYSQL $MYSQLOPTS -e 'SELECT COUNT(*) FROM ${DATABASE}.${PREFIX}mail_queue'`; -$emails =~ /(\d+)/; +$emails =~ /(\d+)/; print "emails.value ".$1."\n"; diff --git a/plugins/smf/smf_online b/plugins/smf/smf_online old mode 100644 new mode 100755 index e9e609b7..2739e5a2 --- a/plugins/smf/smf_online +++ b/plugins/smf/smf_online @@ -65,12 +65,12 @@ EOC #Guests count my $guests = `$MYSQL $MYSQLOPTS -e 'SELECT COUNT(*) FROM ${DATABASE}.${PREFIX}log_online WHERE id_member = 0 AND id_spider = 0'`; -$guests =~ /(\d+)/; +$guests =~ /(\d+)/; print "guests.value ".$1."\n"; #Spiders count my $spiders = `$MYSQL $MYSQLOPTS -e 'SELECT COUNT(*) FROM ${DATABASE}.${PREFIX}log_online WHERE id_spider > 0'`; -$spiders =~ /(\d+)/; +$spiders =~ /(\d+)/; print "spiders.value ".$1."\n"; #Users count diff --git a/plugins/smf/smf_stats b/plugins/smf/smf_stats old mode 100644 new mode 100755 index 7e25f691..47758732 --- a/plugins/smf/smf_stats +++ b/plugins/smf/smf_stats @@ -59,12 +59,12 @@ EOC #Members count my $members = `$MYSQL $MYSQLOPTS -e 'SELECT value FROM ${DATABASE}.${PREFIX}settings WHERE variable = "totalMembers"'`; -$members =~ /(\d+)/; +$members =~ /(\d+)/; print "members.value ".$1."\n"; #Messages count my $messages = `$MYSQL $MYSQLOPTS -e 'SELECT value FROM ${DATABASE}.${PREFIX}settings WHERE variable="totalMessages"'`; -$messages =~ /(\d+)/; +$messages =~ /(\d+)/; print "messages.value ".$1."\n"; #Topics count diff --git a/plugins/smstools/smstools_ b/plugins/smstools/smstools_ index 56e7183a..2f41f5e6 100755 --- a/plugins/smstools/smstools_ +++ b/plugins/smstools/smstools_ @@ -16,7 +16,7 @@ # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# +# # Plugin to monitor an smstools installation # # Usage: Link or copy into the munin-node plugin directory, name it according @@ -58,11 +58,10 @@ MODEM=`basename $0 | sed 's/^smstools_//g'` if [ "$1" = "autoconf" ]; then if [ -d $STATSDIR ]; then echo yes - exit 0 else echo "no ($STATSDIR not found)" - exit 1 fi + exit 0 fi if [ "$1" = "suggest" ]; then @@ -85,14 +84,14 @@ fi # If run with the "config"-parameter, give out information on how the -# graphs should look. - +# graphs should look. + if [ "$1" = "config" ]; then echo 'graph_title SMSTools Report for '${MODEM} echo 'graph_args --base 1000 -l 0' echo 'graph_vlabel ' echo 'graph_scale no' - echo 'graph_category other' + echo 'graph_category chat' echo 'graph_info SMSTools Statistics' echo 'graph_oder succeeded received failed multiple_failed rejected' echo 'rejected.label Rejected' diff --git a/plugins/snmp/snmp__fn/snmp__fn-cpu.png b/plugins/snmp/example-graphs/snmp__fn-cpu.png similarity index 100% rename from plugins/snmp/snmp__fn/snmp__fn-cpu.png rename to plugins/snmp/example-graphs/snmp__fn-cpu.png diff --git a/plugins/snmp/snmp__fn/snmp__fn-memory.png b/plugins/snmp/example-graphs/snmp__fn-memory.png similarity index 100% rename from plugins/snmp/snmp__fn/snmp__fn-memory.png rename to plugins/snmp/example-graphs/snmp__fn-memory.png diff --git a/plugins/snmp/snmp__fn/snmp__fn-sessions.png b/plugins/snmp/example-graphs/snmp__fn-sessions.png similarity index 100% rename from plugins/snmp/snmp__fn/snmp__fn-sessions.png rename to plugins/snmp/example-graphs/snmp__fn-sessions.png diff --git a/plugins/snmp/snmp__fn/snmp__fn-vpnsessions.png b/plugins/snmp/example-graphs/snmp__fn-vpnsessions.png similarity index 100% rename from plugins/snmp/snmp__fn/snmp__fn-vpnsessions.png rename to plugins/snmp/example-graphs/snmp__fn-vpnsessions.png diff --git a/plugins/snmp/multi_snmp_querier b/plugins/snmp/multi_snmp_querier index 0048c6af..4894ff77 100755 --- a/plugins/snmp/multi_snmp_querier +++ b/plugins/snmp/multi_snmp_querier @@ -6,8 +6,8 @@ multi_snmp_querier - Munin plugin to query several SNMP hosts =head1 SYNOPSIS -This plugin is meant to be called from Munin. You should at least set the -'hosts' environment variable from Munin's configuration (i.e. +This plugin is meant to be called from Munin. You should at least set the +'hosts' environment variable from Munin's configuration (i.e. /etc/munin/munin.conf) to specify which hosts and how to query. =head1 DESCRIPTION @@ -20,7 +20,7 @@ This plugin is meant to be called from Munin. You should at least set the Which SNMP OID should we query on; it defaults to 1.3.6.1.2.1.43.10.2.1.4.1.1 (total printed pages - of course, it only -makes sense to query a printer on this ;-) ). +makes sense to query a printer on this ;-) ). Other known and useful OIDs for printers are 1.3.6.1.2.1.43.11.1.1.9.1.1 (total number of pages printed with this @@ -111,7 +111,7 @@ $oid = $ENV{snmp_oid} || $defaults{oid}; total => 'Total', units => 'pages' }, - '1.3.6.1.2.1.43.11.1.1.8.1.1' => + '1.3.6.1.2.1.43.11.1.1.8.1.1' => { title => 'Total projected capacity of this cartridge', vlabel => 'Total capacity', category => 'printing', @@ -120,7 +120,7 @@ $oid = $ENV{snmp_oid} || $defaults{oid}; total => 'Total', units => 'pages' }, - '1.3.6.1.2.1.43.11.1.1.9.1.1' => + '1.3.6.1.2.1.43.11.1.1.9.1.1' => { title => 'Pages printed with this cartridge', vlabel => 'Printed pages', category => 'printing', @@ -129,7 +129,7 @@ $oid = $ENV{snmp_oid} || $defaults{oid}; total => 'Total', units => 'pages' }, - 'default' => + 'default' => { title => "Results for SNMP OID $oid", vlabel => 'units', category => 'Other', @@ -144,7 +144,7 @@ die "Hosts not set - cannot continue" unless @hosts; $cmd_arg = $ARGV[0] || ''; if($cmd_arg eq "config") { my $labels = $known_oids{$oid} || $known_oids{default}; - # See http://munin.projects.linpro.no/wiki/HowToWritePlugins for + # See http://munin.projects.linpro.no/wiki/HowToWritePlugins for # explanation on the following fields print "graph_title $labels->{title}\n"; print "graph_args --base 1000 -l 0\n"; @@ -184,7 +184,7 @@ sub ck_alive{ $ping = Net::Ping->new("tcp", 1); $ping->ping($host); } - + sub get_hosts { # Hosts are defined in the 'hosts' environment variable. It's a list of # hosts (and optionally ports) - We parse the list and arrange it neatly diff --git a/plugins/snmp/snmp__airport b/plugins/snmp/snmp__airport old mode 100644 new mode 100755 index a68188d6..b6687ddc --- a/plugins/snmp/snmp__airport +++ b/plugins/snmp/snmp__airport @@ -49,26 +49,29 @@ Please install the Python bindings for libsnmp. On Debian/Ubuntu machines this package is named 'libsnmp-python'""" sys.exit(-3) -DEBUG=None -CMDS=['type', 'rates', 'time', 'lastrefresh', 'signal', 'noise', 'rate', 'rx', - 'tx', 'rxerr', 'txerr'] -CMD=None -DESTHOST=None -NUMCLIENTS=None -NUMDHCPCLIENTS=None -WANIFINDEX=None +DEBUG = None +CMDS = ['type', 'rates', 'time', 'lastrefresh', 'signal', 'noise', 'rate', 'rx', 'tx', 'rxerr', + 'txerr'] +CMD = None +DESTHOST = None +NUMCLIENTS = None +NUMDHCPCLIENTS = None +WANIFINDEX = None + def dbg(text): """Print some debugging text if DEBUG=1 is in our environment""" if DEBUG is not None: print "DEBUG: %s" % text + def usage(): """Print some usage information about ourselves""" print __doc__ + def parseName(name): - """Examing argv[0] (i.e. the name of this script) for the hostname we should + """Examining argv[0] (i.e. the name of this script) for the hostname we should be talking to and the type of check we want to run. The hostname should be a valid, resolvable hostname, or an IP address. The command can be any of: * clients - number of connected wireless clients @@ -88,6 +91,7 @@ def parseName(name): dbg("parseName found an inconsistent name: '%s'" % name) return None + def tableToDict(table, num): """The netsnmp library returns a tuple with all of the data, it is not in any way formatted into rows. This function converts the data into a structured @@ -129,6 +133,7 @@ def tableToDict(table, num): return clientTable + def getNumClients(): """Returns the number of wireless clients connected to the Airport we are examining. This will only ever be polled via SNMP once per invocation. If @@ -148,6 +153,7 @@ def getNumClients(): dbg("getNumClients: found %d clients" % NUMCLIENTS) return NUMCLIENTS + def getNumDHCPClients(): """Returns the number of DHCP clients with currently active leases. This will only ever be polled via SNMP once per invocation. If called a second @@ -167,6 +173,7 @@ def getNumDHCPClients(): dbg("getNumDHCPClients: found %d clients" % NUMDHCPCLIENTS) return NUMDHCPCLIENTS + def getExternalInterface(): """Returns the index of the WAN interface of the Airport. This will only ever be polled via SNMP once per invocation, per getNum*Clients(). See @@ -176,8 +183,8 @@ def getExternalInterface(): if WANIFINDEX is None: interfaces = list(netsnmp.snmpwalk(netsnmp.Varbind(iFaceNames), - Version=2, DestHost=DESTHOST, - Community='public')) + Version=2, DestHost=DESTHOST, + Community='public')) dbg("getExternalInterface: found interfaces: %s" % interfaces) try: WANIFINDEX = interfaces.index('mgi1') + 1 @@ -189,14 +196,17 @@ def getExternalInterface(): dbg("getExternalInterface: found mgi1 at index: %d" % WANIFINDEX) return WANIFINDEX + def getExternalInOctets(): """Returns the number of octets of inbound traffic on the WAN interface""" return getOctets('In') + def getExternalOutOctets(): """Returns the number of octets of outbound traffic on the WAN interface""" return getOctets('Out') + def getOctets(direction): """Returns the number of octets of traffic on the WAN interface in the requested direction""" @@ -211,6 +221,7 @@ def getOctets(direction): Version=2, DestHost=DESTHOST, Community='public')[0]) + def getWanSpeed(): """Returns the speed of the WAN interface""" ifSpeed = "1.3.6.1.2.1.2.2.1.5.%s" % getExternalInterface() @@ -219,12 +230,13 @@ def getWanSpeed(): wanSpeed = int(netsnmp.snmpget(netsnmp.Varbind(ifSpeed), Version=2, DestHost=DESTHOST, Community='public')[0]) - except: + except: # noqa: E722 (TODO: specify the expected exceptions) dbg("getWanSpeed: Unable to probe for data, defaultint to 10000000") wanSpeed = 10000000 return wanSpeed + def getData(): """Returns a dictionary populated with all of the wireless clients and their metadata""" @@ -246,6 +258,7 @@ def getData(): return clients + def main(clients=None): """This function fetches metadata about wireless clients if needed, then displays whatever values have been requested""" @@ -263,6 +276,7 @@ def main(clients=None): for client in clients: print "MAC_%s.value %s" % (client, clients[client][CMD]) + if __name__ == '__main__': clients = None if os.getenv('DEBUG') == '1': @@ -352,4 +366,3 @@ send.min 0""" % (speed, speed) sys.exit(0) else: main(clients) - diff --git a/plugins/snmp/snmp__brocade_ifs b/plugins/snmp/snmp__brocade_ifs index a6b93b0d..b43691d2 100755 --- a/plugins/snmp/snmp__brocade_ifs +++ b/plugins/snmp/snmp__brocade_ifs @@ -1,7 +1,7 @@ #!/usr/bin/env python """ -Munin plugin which reports selected counters regarding ports on a +Munin plugin which reports selected counters regarding ports on a Brocade SAN FC-switch. Only enabled ports are considered. The counters shown: @@ -14,7 +14,7 @@ enc_out: Encoding errors outside FC frame. enc_out_per_mframe: As above, but per million frames of traffic. If there is a high number for this counter, it could reflect: - - If there is also a high value for + - If there is also a high value for rx_crcs for the port, then there is likely a GBIC/SFP problem. - If there the value of rx_crcs for the port @@ -22,7 +22,7 @@ enc_out_per_mframe: As above, but per million frames of traffic. problem. rx_crcs: CRC errors detected in received frames. - Together with enc_out errors, CRC errors + Together with enc_out errors, CRC errors indicate a GBIC/SFP problem. bits: Number of bits transmitted(tx)/received(rx) @@ -38,7 +38,7 @@ snmp_HOSTNAME_brocade_ifs """ # Note: In the SNMP output from brocade switches, the interesting -# counters are named with numbers starting with 1, while the +# counters are named with numbers starting with 1, while the # ports' real names on the box and in the administration interface # start with 0. And there doesn't seem to be a way to map between # ifDesc and the interesting crc and enc_out counters :-( @@ -63,7 +63,7 @@ snmp_HOSTNAME_brocade_ifs # Released according to the "New BSD License" AKA the 3-clause # BSD License: -# ==================================================================== +# ==================================================================== # Copyright (c) 2011, Danish National Board of Health. # All rights reserved. # @@ -88,7 +88,7 @@ snmp_HOSTNAME_brocade_ifs # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ==================================================================== +# ==================================================================== # $Id: brocade_san_switch_ports_ 15443 2011-03-03 12:23:56Z tra $ @@ -140,7 +140,7 @@ def debug(msg): print('Debug: %s\n' % msg) # Break OID-string in to a tuple of elements -def oidstr2tuple(oidstr): +def oidstr2tuple(oidstr): int_list = [ int(s) for s in oidstr.split('.') ] return tuple(int_list) @@ -180,7 +180,7 @@ def print_config(host_name,enabled_ports): print('multigraph %s.port_%d' % (counter_type,portnum-1)) # ARGH: numbering base stuff print('graph_title Port %d %s' % (portnum-1,counter_type)) # ARGH: numbering base stuff print('graph_args --base 1000 -l 0') - print('graph_category SAN') + print('graph_category san') print('graph_info This graph shows the count of %s' % descriptions[counter_type]) if counter_type == 'bits': @@ -204,7 +204,7 @@ def print_config(host_name,enabled_ports): print('multigraph %s' % counter_type) print('graph_title %s total %s' % (host_name,counter_type)) print('graph_args --base 1000 -l 0') - print('graph_category SAN') + print('graph_category san') print('graph_info This graph shows the total count of %s across all ports' % descriptions[counter_type]) if counter_type == 'bits': diff --git a/plugins/snmp/snmp__cpu_usage b/plugins/snmp/snmp__cpu_usage index 00aa86f2..2bb58ed6 100755 --- a/plugins/snmp/snmp__cpu_usage +++ b/plugins/snmp/snmp__cpu_usage @@ -119,7 +119,7 @@ my $cpuIdle = "1.3.6.1.4.1.2021.11.11"; my $cpu = 0; -my %cpuCounters = ( +my %cpuCounters = ( cpuUser => "1.3.6.1.4.1.2021.11.9.$cpu", # The percentage of CPU time spent processing user-level code, calculated over the last minute cpuSystem => "1.3.6.1.4.1.2021.11.10.$cpu", # The percentage of CPU time spent processing system-level code, calculated over the last minute cpuIdle => "1.3.6.1.4.1.2021.11.11.$cpu", # The percentage of processor time spent idle, calculated over the last minute @@ -147,7 +147,7 @@ if ($ARGV[0] and $ARGV[0] eq "config") foreach my $c (keys %cpuCounters) { print $c . ".label $c\n"; print $c . ".type GAUGE\n"; - if ($firstCounter) { + if ($firstCounter) { print $c . ".draw AREA\n"; $firstCounter = 0; } else { @@ -156,7 +156,7 @@ if ($ARGV[0] and $ARGV[0] eq "config") print $c . ".min 0\n"; } - exit 0; + exit 0; } foreach my $c (keys %cpuCounters) { diff --git a/plugins/snmp/snmp__cyberpower b/plugins/snmp/snmp__cyberpower old mode 100644 new mode 100755 index 01e5c216..f8b592e2 --- a/plugins/snmp/snmp__cyberpower +++ b/plugins/snmp/snmp__cyberpower @@ -58,7 +58,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "multigraph cyberpower_load graph_title CyberPower UPS Status graph_info This graph shows battery status information. -graph_category ups +graph_category sensors graph_vlabel % graph_args --upper-limit 100 -l 0 graph_scale no @@ -75,22 +75,22 @@ charge.type GAUGE charge.min 0 charge.max 100 "; - + print "multigraph cyberpower_runtime graph_title CyberPower UPS Runtime -graph_info This graph shows expected runtime informatiom. -graph_category ups +graph_info This graph shows expected runtime information. +graph_category sensors graph_vlabel minutes "; print "runtime.label Expected runtime runtime.draw AREA runtime.type GAUGE "; - + print "multigraph cyberpower_voltage graph_title CyberPower UPS Voltages graph_info This graph shows voltage information. -graph_category ups +graph_category sensors graph_vlabel V "; print "input.label Input voltage @@ -101,12 +101,12 @@ input.type GAUGE output.draw LINE1 output.type GAUGE "; - + if(oidExists(oid_cps_env_temp) && oidExists(oid_cps_env_humidity)){ print "multigraph cyberpower_environment graph_title CyberPower UPS Environment graph_info This graph shows environmental status information. -graph_category ups +graph_category sensors graph_vlabel F/% "; print "temp.label Temperature @@ -120,7 +120,7 @@ humidity.min 0 humidity.max 100 "; } - + exit 0; } @@ -177,7 +177,7 @@ sub oidExists { } my $oid = $_[0]; my $val = $session->get_single($oid); - + if(!length $val || $val eq 'noSuchInstance' || $val eq 'U'){ return(0); }else{ diff --git a/plugins/snmp/snmp__fn/snmp__fn b/plugins/snmp/snmp__fn similarity index 86% rename from plugins/snmp/snmp__fn/snmp__fn rename to plugins/snmp/snmp__fn index e6122725..de478b48 100755 --- a/plugins/snmp/snmp__fn/snmp__fn +++ b/plugins/snmp/snmp__fn @@ -3,11 +3,11 @@ # File: snmp__fn # Description: SNMP plugin to monitor open sessions, sslvpn, CPU and Memory on a # Fortinet Fortigate firewall. -# +# # Author: Thom Diener # License: This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; version 2 dated +# as published by the Free Software Foundation; version 2 dated # June, 1991. # # Version: v1.00 30.10.2011 First draft of the fortigate plugin @@ -22,21 +22,21 @@ # (Example: ln -s /usr/share/munin/plugins/snmp__fn \ # /etc/munin/plugins/snmp_foo.example.com_fn) # -# Add global community string +# Add global community string # vi /etc/munin/plugin-conf.d/munin-node # [snmp_*] # env.community private # timeout 45 # In case low latency or timeout # -# Fortigate Activate snmp on your Fortigate firewall. -# Fortigate documentation at https://support.fortinet.com +# Fortigate Activate snmp on your Fortigate firewall. +# Fortigate documentation at https://support.fortinet.com # # MIB Download and copy the original Fortigate MIB definition files to: # /usr/share/snmp/mibs/FORTINET-CORE-MIB.mib.txt # /usr/share/snmp/mibs/FORTINET-FORTIGATE-MIB.mib # -# Testing This plugin has been tested with the following OS/software: -# +# Testing This plugin has been tested with the following OS/software: +# # Appliance/Firmware: # Fortigate-50B 3.00-b0662(MR6 Patch 1) work with v1.00-1.02 # Fortigate-50B 3.00-b0678(MR6 Patch 6) work with v1.00-1.02 @@ -44,10 +44,10 @@ # Fortigate-50B 4.00-b0217(MR1 Patch 10) work with v1.00-1.02 # Fortigate-50B 4.00-b0217(MR2 Patch 4) work with v1.00-1.02 # Fortigate-50B 4.00-b0521(MR3 Patch 6) work with v1.03 -# +# # Munin-Version: -# Munin 1.4.4 (1.4.4-1ubuntu1) -# OS-Version: +# Munin 1.4.4 (1.4.4-1ubuntu1) +# OS-Version: # Ubuntu 10.04.3 LTS (lucid) x86_32/64 # #%# family=manual @@ -87,26 +87,16 @@ ATUN=`$SNMPGET $fnVPNSslStatsActiveTunnels | cut -d ":" -f4 | cut -d " " -f2` autoconf() { - if [ $SCPU ]; then - echo yes, OID $FGTcpu can be readed. + if [ -z "$SCPU" ]; then + echo "no (one or multiple OID can not be read)" + elif [ -z "$SMEM" ]; then + echo "no (one or multiple OID can not be read)" + elif [ -z "$SCNT" ]; then + echo "no (one or multiple OID can not be read)" else - echo no, one or multiple OID can not be readed. - exit 1 + echo "yes" fi - - if [ $SMEM ]; then - echo yes, OID $fnSysMemUsage can be readed. - else - echo no, one or multiple OID can not be readed. - exit 1 - fi - if [ $SCNT ]; then - echo yes, OID $fnSysSesCount can be readed. - else - echo no, one or multiple OID can not be read. - exit 1 - fi -exit 0 + exit 0 } config() @@ -148,9 +138,9 @@ config() echo "graph_title $UNIT - SSLvpn Sessions" echo 'graph_category fw' echo 'graph_vlabel Sessions/Users' - echo 'graph_info Loged in users with SSLvpn (WebSession or Tunnel-Mode)' + echo 'graph_info Logged in users with SSLvpn (WebSession or Tunnel-Mode)' echo 'fortiuser.label Users' - echo 'fortiuser.info Loged in SSLvpn users' + echo 'fortiuser.info Logged in SSLvpn users' echo 'fortiwebs.label WebSessions' echo 'fortiwebs.info Active SSLvpn WebSessions' echo 'fortiatun.label ActiveTunnels' diff --git a/plugins/snmp/snmp__if_combined b/plugins/snmp/snmp__if_combined index ddb52219..ce045461 100755 --- a/plugins/snmp/snmp__if_combined +++ b/plugins/snmp/snmp__if_combined @@ -95,7 +95,7 @@ Should support indexing by Pulling in a user definable set of ifName/ifDescr/ifAlias for textual description and even graph_title would also be nice. -IFF we get a patch to support the .oldname attribute then we may use +If we get a patch to support the .oldname attribute then we may use that to let the operator change the indexing dynamically without data loss. diff --git a/plugins/snmp/snmp__webthermometer b/plugins/snmp/snmp__webthermometer index c81ac19f..b0b4f51b 100755 --- a/plugins/snmp/snmp__webthermometer +++ b/plugins/snmp/snmp__webthermometer @@ -59,7 +59,7 @@ if [ "$1" = "config" ]; then # some fix values echo "host_name $SNMPCLIENT" - echo 'graph_category Other' + echo 'graph_category sensors' echo 'graph_args --base 1000 -l 0' # some variables, fetched from the device @@ -72,7 +72,7 @@ if [ "$1" = "config" ]; then SENSOR_2_CRITICAL=`$SNMPGET WebGraph-2xThermometer-MIB::wtWebioAn2GraphAlarmMax.2 | sed s/.*STRING:// | sed s/\"//g` # echo the result to munin - echo "graph_title $GRAPH_TITLE" + echo "graph_title $GRAPH_TITLE" echo "graph_info $GRAPH_INFO" echo "graph_vlabel $GRAPH_VLABEL" echo "Sensor_1.label $SENSOR_1_LABEL" diff --git a/plugins/snmp/snmp__wmsconnectedplayers b/plugins/snmp/snmp__wmsconnectedplayers index 506f31f4..2c3f9a1a 100755 --- a/plugins/snmp/snmp__wmsconnectedplayers +++ b/plugins/snmp/snmp__wmsconnectedplayers @@ -16,13 +16,13 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Derived from SNMP plugin code written by +# Derived from SNMP plugin code written by # Jimmy Olsen, Dagfinn Ilmari Mannsaaker # ####################################################################### # # You must enable SNMP for Windows Media Services by running: -# +# # regsvr32 "%systemroot%\system32\windows media\server\wmssnmp.dll # ####################################################################### @@ -86,7 +86,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n"; print "graph_title wmsConnectedPlayers -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel wmsConnectedPlayers graph_category streaming graph_info This graph shows wmsConnectedPlayers. diff --git a/plugins/snmp/snmp__wmsplayerallocatedbandwidth b/plugins/snmp/snmp__wmsplayerallocatedbandwidth index a29cc123..8f5209ee 100755 --- a/plugins/snmp/snmp__wmsplayerallocatedbandwidth +++ b/plugins/snmp/snmp__wmsplayerallocatedbandwidth @@ -16,13 +16,13 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Derived from SNMP plugin code written by +# Derived from SNMP plugin code written by # Jimmy Olsen, Dagfinn Ilmari Mannsaaker # ####################################################################### # # You must enable SNMP for Windows Media Services by running: -# +# # regsvr32 "%systemroot%\system32\windows media\server\wmssnmp.dll # ####################################################################### @@ -92,7 +92,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n"; print "graph_title wmsPlayerAllocatedBandwidth -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel wmsPlayerAllocatedBandwidth graph_category streaming graph_info This graph shows wmsPlayerAllocatedBandwidth. diff --git a/plugins/snmp/snmp__wmsstreaminghttpplayers b/plugins/snmp/snmp__wmsstreaminghttpplayers index c3a8492e..3118478c 100755 --- a/plugins/snmp/snmp__wmsstreaminghttpplayers +++ b/plugins/snmp/snmp__wmsstreaminghttpplayers @@ -16,13 +16,13 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Derived from SNMP plugin code written by +# Derived from SNMP plugin code written by # Jimmy Olsen, Dagfinn Ilmari Mannsaaker # ####################################################################### # # You must enable SNMP for Windows Media Services by running: -# +# # regsvr32 "%systemroot%\system32\windows media\server\wmssnmp.dll # ####################################################################### @@ -87,7 +87,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n"; print "graph_title wmsStreamingHttpPlayers -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel wmsStreamingHttpPlayers graph_category streaming graph_info This graph shows wmsStreamingHttpPlayers. diff --git a/plugins/snmp/snmp__wmsstreamingmmsplayers b/plugins/snmp/snmp__wmsstreamingmmsplayers index 2608e731..29f8e258 100755 --- a/plugins/snmp/snmp__wmsstreamingmmsplayers +++ b/plugins/snmp/snmp__wmsstreamingmmsplayers @@ -16,13 +16,13 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Derived from SNMP plugin code written by +# Derived from SNMP plugin code written by # Jimmy Olsen, Dagfinn Ilmari Mannsaaker # ####################################################################### # # You must enable SNMP for Windows Media Services by running: -# +# # regsvr32 "%systemroot%\system32\windows media\server\wmssnmp.dll # ####################################################################### @@ -87,7 +87,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n"; print "graph_title wmsStreamingMmsPlayers -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel wmsStreamingMmsPlayers graph_category streaming graph_info This graph shows wmsStreamingMmsPlayers. diff --git a/plugins/snmp/snmp__wmsstreamingplayers b/plugins/snmp/snmp__wmsstreamingplayers index 7f93b605..2cf4c8c6 100755 --- a/plugins/snmp/snmp__wmsstreamingplayers +++ b/plugins/snmp/snmp__wmsstreamingplayers @@ -16,13 +16,13 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Derived from SNMP plugin code written by +# Derived from SNMP plugin code written by # Jimmy Olsen, Dagfinn Ilmari Mannsaaker # ####################################################################### # # You must enable SNMP for Windows Media Services by running: -# +# # regsvr32 "%systemroot%\system32\windows media\server\wmssnmp.dll # ####################################################################### @@ -84,7 +84,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n"; print "graph_title wmsStreamingPlayers -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel wmsStreamingPlayers graph_category streaming graph_info This graph shows wmsStreamingPlayers. diff --git a/plugins/snmp/snmp__wmsstreamingrtspplayers b/plugins/snmp/snmp__wmsstreamingrtspplayers index 333a8283..300b24cb 100755 --- a/plugins/snmp/snmp__wmsstreamingrtspplayers +++ b/plugins/snmp/snmp__wmsstreamingrtspplayers @@ -16,13 +16,13 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -# Derived from SNMP plugin code written by +# Derived from SNMP plugin code written by # Jimmy Olsen, Dagfinn Ilmari Mannsaaker # ####################################################################### # # You must enable SNMP for Windows Media Services by running: -# +# # regsvr32 "%systemroot%\system32\windows media\server\wmssnmp.dll # ####################################################################### @@ -86,7 +86,7 @@ if (defined $ARGV[0] and $ARGV[0] eq "config") { print "host_name $host\n"; print "graph_title wmsStreamingRtspPlayers -graph_args --base 1000 -l 0 +graph_args --base 1000 -l 0 graph_vlabel wmsStreamingRtspPlayers graph_category streaming graph_info This graph shows wmsStreamingRtspPlayers. diff --git a/plugins/snmp/snmp_room_alert_ b/plugins/snmp/snmp_room_alert_ index 249c6c12..a1822add 100755 --- a/plugins/snmp/snmp_room_alert_ +++ b/plugins/snmp/snmp_room_alert_ @@ -1,6 +1,4 @@ -#!/usr/bin/ruby -# encoding: utf-8 - +#!/usr/bin/env ruby # Plugin to monitor Room Alert 11E environmental units. # Requires ruby and the ruby SNMP library. # @@ -34,31 +32,31 @@ require 'snmp' -base_oid = "enterprises.20916.1.3.1" +base_oid = 'enterprises.20916.1.3.1' case $0.match('[^_]+$')[0] -when "temperature" +when 'temperature' subchannel = 1 - name = "temperature" - label = "°C" - letter = "t" -when "humidity" + name = 'temperature' + label = '°C' + letter = 't' +when 'humidity' subchannel = 3 - name = "humidity" - label = "% Relative Humidity" - letter = "h" + name = 'humidity' + label = '% Relative Humidity' + letter = 'h' else exit 1 end def is_vb_valid(vb, subchannel) - return (vb.name[-1] == 0 and vb.name[-2] == subchannel and vb.value > 1) + (vb.name[-1] == 0 and vb.name[-2] == subchannel and vb.value > 1) end def field_name(unit, vb, letter) clean_unit = unit.gsub(/[.-]/, '_') sensor = vb.name[-3].to_s - return "#{clean_unit}_#{letter}#{sensor}" + "#{clean_unit}_#{letter}#{sensor}" end def label(unit, vb) @@ -67,28 +65,25 @@ def label(unit, vb) label = "#{unit} " + (ENV["label_#{clean_unit}_#{sensor}"] || "sensor #{sensor}") end -units = (ENV['units'] || "").split(/\s+/) -community = ENV['community'] || "public" +units = (ENV['units'] || '').split(/\s+/) +community = ENV['community'] || 'public' case ARGV[0] -when "autoconf" - puts "no" - exit 1 -when "config" +when 'autoconf' + puts 'no' + exit 0 +when 'config' puts "graph_title Room Alert 11E units (#{name} probes)" puts "graph_vlabel #{label}" - puts "graph_category sensors" - if name == "humidity" - puts "graph_args --lower-limit 0 --upper-limit 100" - end + puts 'graph_category sensors' + puts 'graph_args --lower-limit 0 --upper-limit 100' if name == 'humidity' units.each do |unit| - SNMP::Manager.open(:Host => unit, - :Community => community, - :Version => :SNMPv1) do |manager| + SNMP::Manager.open(Host: unit, + Community: community, + Version: :SNMPv1) do |manager| manager.walk(base_oid) do |vb| - if not is_vb_valid(vb, subchannel) - next - end + next unless is_vb_valid(vb, subchannel) + puts "#{field_name(unit, vb, letter)}.label #{label(unit, vb)}" end end @@ -96,15 +91,13 @@ when "config" exit 0 end - units.each do |unit| - SNMP::Manager.open(:Host => unit, - :Community => community, - :Version => :SNMPv1) do |manager| + SNMP::Manager.open(Host: unit, + Community: community, + Version: :SNMPv1) do |manager| manager.walk(base_oid) do |vb| - if not is_vb_valid(vb, subchannel) - next - end + next unless is_vb_valid(vb, subchannel) + puts "#{field_name(unit, vb, letter)}.value #{vb.value.to_f / 100}" end end diff --git a/plugins/solar/example-graphs/fronius-week.png b/plugins/solar/example-graphs/fronius-week.png new file mode 100644 index 00000000..1c56e079 Binary files /dev/null and b/plugins/solar/example-graphs/fronius-week.png differ diff --git a/plugins/solar/fronius b/plugins/solar/fronius new file mode 100755 index 00000000..c31cacce --- /dev/null +++ b/plugins/solar/fronius @@ -0,0 +1,288 @@ +#!/bin/sh +# -*- sh -*- + +: << =cut + +=head1 NAME + +fronius - Plugin to monitor Fronius Solar inverter using the JSON Solar API. + +The Solar API reports both an immediate power output reading at +time-of-request, and an incremental sum of daily and yearly produced energy. +This plugin uses the yearly energy sum as a DERIVE value, and calculates the +average power output during the last measurement interval. This will likely be +lower than the immediate reading, but the aggregation in weekly/monthly/yearly +graphs will be more correct. The immediate power output is output as extra +information. + +=head1 CONFIGURATION + + [fronius] + env.inverter_base_url http://fronius # this is the default + env.host_name solar_inverter # optional, host name to report data as in munin + env.connect_timeout 1 # optional, amount to wait for requests, in seconds + +=head1 CACHING + +As the inverter may go to sleep at night, the initial service information is cached +locally, with a twelve-hour lifetime, before hitting the Solar API again. However, +if hitting the API to refresh the cache fails, the stale cache is used anyway, +to have a better chance of getting the config data out nonetheless. + +=head1 CAVEAT + +Only tested on a Fronius Primo. + +=head1 AUTHOR + +Olivier Mehani + +Copyright (C) 2020 Olivier Mehani + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + +=head1 MAGIC MARKERS + + #%# family=manual + +=cut + +# Example outputs +# +## http://fronius/solar_api/v1/GetInverterInfo.cgi +#GetInverterInfo=' +#{ +# "Body" : { +# "Data" : { +# "1" : { +# "CustomName" : "Primo 5.0-1 (1)", +# "DT" : 76, +# "ErrorCode" : 0, +# "PVPower" : 5200, +# "Show" : 1, +# "StatusCode" : 7, +# "UniqueID" : "1098861" +# } +# } +# }, +# "Head" : { +# "RequestArguments" : {}, +# "Status" : { +# "Code" : 0, +# "Reason" : "", +# "UserMessage" : "" +# }, +# "Timestamp" : "2020-06-11T10:55:23+10:00" +# } +#} +#' +# +## http://fronius/solar_api/v1/GetPowerFlowRealtimeData.fcgi +#GetPowerFlowRealtimeData=' +#{ +# "Body" : { +# "Data" : { +# "Inverters" : { +# "1" : { +# "DT" : 76, +# "E_Day" : 1201, +# "E_Total" : 1201, +# "E_Year" : 1201.4000244140625, +# "P" : 2521 +# } +# }, +# "Site" : { +# "E_Day" : 1201, +# "E_Total" : 1201, +# "E_Year" : 1201.4000244140625, +# "Meter_Location" : "unknown", +# "Mode" : "produce-only", +# "P_Akku" : null, +# "P_Grid" : null, +# "P_Load" : null, +# "P_PV" : 2521, +# "rel_Autonomy" : null, +# "rel_SelfConsumption" : null +# }, +# "Version" : "12" +# } +# }, +# "Head" : { +# "RequestArguments" : {}, +# "Status" : { +# "Code" : 0, +# "Reason" : "", +# "UserMessage" : "" +# }, +# "Timestamp" : "2020-06-11T10:55:21+10:00" +# } +#} +#' +# +## http://fronius/solar_api/v1/GetActiveDeviceInfo.cgi?DeviceClass=SensorCard +#GetActiveDeviceInfo=' +#{ +# "Body" : { +# "Data" : {} +# }, +# "Head" : { +# "RequestArguments" : { +# "DeviceClass" : "SensorCard" +# }, +# "Status" : { +# "Code" : 0, +# "Reason" : "", +# "UserMessage" : "" +# }, +# "Timestamp" : "2020-06-11T10:55:24+10:00" +# } +#} +#' +# +## http://fronius/solar_api/v1/GetLoggerConnectionInfo.cgi +#GetLoggerConnectionInfo=' +#{ +# "Body" : { +# "Data" : { +# "SolarNetConnectionState" : 2, +# "SolarWebConnectionState" : 2, +# "WLANConnectionState" : 2 +# } +# }, +# "Head" : { +# "RequestArguments" : {}, +# "Status" : { +# "Code" : 0, +# "Reason" : "", +# "UserMessage" : "" +# }, +# "Timestamp" : "2020-06-11T10:55:25+10:00" +# } +#} +#' + +set -eu + +# shellcheck disable=SC1090 +. "${MUNIN_LIBDIR}/plugins/plugin.sh" + +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + set -x +fi + +INVERTER_BASE_URL=${inverter_base_url:-http://fronius} +HOST_NAME=${host_name:-} +CONNECT_TIMEOUT=${connect_timeout:-1} + +check_deps() { + for CMD in curl jq recode; do + if ! command -v "${CMD}" >/dev/null; then + echo "no (${CMD} not found)" + fi + done +} + +CURL_ARGS="-s --connect-timeout ${CONNECT_TIMEOUT}" +fetch() { + # shellcheck disable=SC2086 + curl -f ${CURL_ARGS} "$@" \ + || { echo "error fetching ${*}" >&2; false; } +} + +get_inverter_info() { + fetch "${INVERTER_BASE_URL}/solar_api/v1/GetInverterInfo.cgi" \ + | recode html..ascii +} + +get_power_flow_realtime_data() { + fetch "${INVERTER_BASE_URL}/solar_api/v1/GetPowerFlowRealtimeData.fcgi" + #echo "${GetPowerFlowRealtimeData} +} + +# Run the command and arguments passed as arguments to this method, and cache +# the response. The first argument is a timeout (in minutes) after which the +# cache is ignored and a new request is attempted. If the request fails, the +# cache is used. If the timeout is 0, the request is always attempted, using +# the cache as a backup on failure. +cached() { + timeout="${1}" + shift + fn="${1}" + shift + # shellcheck disable=SC2124 + args="${@}" + + # shellcheck disable=SC2039 + api_data='' + # shellcheck disable=SC2039 + cachefile="${MUNIN_PLUGSTATE}/$(basename "${0}").${fn}.cache.json" + if [ -n "$(find "${cachefile}" -mmin "-${timeout}" 2>/dev/null)" ]; then + api_data=$(cat "${cachefile}") + else + # shellcheck disable=SC2086 + api_data="$("${fn}" ${args} \ + || true)" + + if [ -n "${api_data}" ]; then + echo "${api_data}" > "${cachefile}" + else + api_data=$(cat "${cachefile}") + fi + fi + echo "${api_data}" +} + +config() { + if test -n "${HOST_NAME}"; then + echo "host_name ${HOST_NAME}" + fi + # graph_period is not a shell variable + cat <<'EOF' +graph_title Solar Inverter Output +graph_info Power generated from solar inverters +graph_total Total output +graph_category sensors +graph_vlabel Average output [W] +graph_args -l 0 --base 1000 +EOF +cached 720 get_inverter_info | jq -r '.Body.Data + | to_entries[] + | @text " +inverter\(.key).label \(.value.CustomName) +inverter\(.key).info Power generated by the solar array (total size \(.value.PVPower / 1000) kW) connected to inverter \(.value.CustomName) (ID: \(.value.UniqueID)) +inverter\(.key).cdef inverter\(.key),3600,* +inverter\(.key).type DERIVE +inverter\(.key).min 0 +inverter\(.key).max \(.value.PVPower) +inverter\(.key).draw AREASTACK +"' +} + +get_data() { +cached 0 get_power_flow_realtime_data | jq -r '.Body.Data.Inverters + | to_entries[] + | @text " +inverter\(.key).value \(.value.E_Year | round) +inverter\(.key).extinfo Immediate output: \(.value.P) W; Daily total: \(.value.E_Day | round) Wh; Yearly total: \(.value.E_Year / 1000 | round) kWh +"' +} + +main () { + check_deps + + case ${1:-} in + config) + config + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then + get_data + fi + ;; + *) + get_data + ;; + esac +} + +main "${1:-}" diff --git a/plugins/solaris/forks b/plugins/solaris/forks index 5edf0a81..97874d4b 100755 --- a/plugins/solaris/forks +++ b/plugins/solaris/forks @@ -66,7 +66,7 @@ config() { echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d' # print data source attributes - # split line into field,type,draw,label + # split line into field,type,draw,label local field type draw label echo "$data_attr" | while read -r field type draw label do @@ -98,7 +98,7 @@ autoconf) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/solaris/fsstat_act_ b/plugins/solaris/fsstat_act_ index c9113586..533d562d 100755 --- a/plugins/solaris/fsstat_act_ +++ b/plugins/solaris/fsstat_act_ @@ -12,9 +12,9 @@ Tested with Solaris 10 and 11. And should work with Solaris 10 11/06 or above. Note: - In Solaris 11, fsstat command can get stats for each non-global zones in + In Solaris 11, fsstat command can get stats for each non-global zones in global zone. (see man fsstat) - In global zone, this plugin gets stats of only global zone. + In global zone, this plugin gets stats of only global zone. In non-global zones, this plugin reports stats of the non-global zones. =head1 CONFIGURATION @@ -62,11 +62,11 @@ stat_regexp='/^(?!(class|crtime|snaptime|.*_bytes))/' # Graph settings global_attr=" - graph_title File system statictics - Activities of $( echo "$fs_type" | tr '[:lower:]' '[:upper:]' ) - graph_category disk + graph_title File system statistics - Activities of $( echo "$fs_type" | tr '[:lower:]' '[:upper:]' ) + graph_category disk graph_args --base 1000 graph_vlabel Counts per second - graph_info File system statictics - Activities of $( echo "$fs_type" | tr '[:lower:]' '[:upper:]' ) + graph_info File system statistics - Activities of $( echo "$fs_type" | tr '[:lower:]' '[:upper:]' ) " # Functions @@ -78,7 +78,7 @@ get_zone_id() { zoneid=0 osver=$( uname -r | cut -d. -f2 ) - + if [ "$osver" -ge 11 ]; then zonename=$( zonename ) zoneid=$( /usr/sbin/zoneadm list -p | awk -F: '$2 == "'"$zonename"'" { print $1 }' ) @@ -101,7 +101,7 @@ suggest() { kstat -p "unix:${zone_id}:${name_regexp}:/^(read_bytes|write_bytes)\$/" \ | sed -e 's/vopstats_//' -e 's/:/ /g' \ | awk '{ - sum[ $3 ] += $5 + sum[ $3 ] += $5 } END { for ( i in sum ) { @@ -163,7 +163,7 @@ suggest) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/solaris/fsstat_bytes b/plugins/solaris/fsstat_bytes index c95bfdfc..49717a22 100755 --- a/plugins/solaris/fsstat_bytes +++ b/plugins/solaris/fsstat_bytes @@ -9,9 +9,9 @@ Tested with Solaris 10 and 11. Note: - In Solaris 11, fsstat command can get stats for each non-global zones in + In Solaris 11, fsstat command can get stats for each non-global zones in global zone. (see man fsstat) - In global zone, this plugin gets stats of only global zone. + In global zone, this plugin gets stats of only global zone. In non-global zones, this plugin reports stats of the non-global zones. =head1 CONFIGURATION @@ -22,7 +22,7 @@ =head1 ENVIRONMENT VARIABLES - env.exclude - file system(s) to exclude seperated by white-space. + env.exclude - file system(s) to exclude separated by white-space. example: env.exclude autofs default: none @@ -53,11 +53,11 @@ name_regexp='/^vopstats_(?![0-9a-f]{7})[a-z]/' # data source of fsstat # Set graph settings global_attr=" - graph_title File system statictics - I/O throughput + graph_title File system statistics - I/O throughput graph_category disk graph_args --base 1024 graph_vlabel Bytes per second write (-) / read (+) - graph_info File system statictics - I/O throughput + graph_info File system statistics - I/O throughput " data_in=read_bytes data_out=write_bytes @@ -86,7 +86,7 @@ get_zone_id() { zoneid=0 osver=$( uname -r | cut -d. -f2 ) - + if [ "$osver" -ge 11 ]; then zonename=$( zonename ) zoneid=$( /usr/sbin/zoneadm list -p | awk -F: '$2 == "'"$zonename"'" { print $1 }' ) @@ -160,7 +160,7 @@ autoconf) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/solaris/interrupts b/plugins/solaris/interrupts index d421004e..50169246 100755 --- a/plugins/solaris/interrupts +++ b/plugins/solaris/interrupts @@ -78,7 +78,7 @@ config() { echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d' # print data source attributes - # split line into field,type,draw,label + # split line into field,type,draw,label local field type draw label echo "$data_attr" | while read -r field type draw label do @@ -118,7 +118,7 @@ autoconf) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/solaris/io_disk b/plugins/solaris/io_disk index 3701d16b..08c14e05 100755 --- a/plugins/solaris/io_disk +++ b/plugins/solaris/io_disk @@ -8,7 +8,7 @@ These functions are implemented: ops : similar to iostat r/s, w/s - bytes : similar to iostat kr/s, kw/s + bytes : similar to iostat kr/s, kw/s busy : similar to iostat %b, %w (%w usually indicates 0 in Sol10,11) queue : similar to iostat actv, wait latency : similar to iostat asvc_t, wsvc_t @@ -25,11 +25,11 @@ cd /path/to/munin/etc/plugins ln -s /path/to/munin/lib/plugins/io_disk . - The RRD files generated by io_busy_, io_ops_, io_bytes_ can be taken over + The RRD files generated by io_busy_, io_ops_, io_bytes_ can be taken over by this plugin. Thus, please remove symlinks of those plugins before using this plugin. - By default, this plugin monitors disk devices. And also it can monitor + By default, this plugin monitors disk devices. And also it can monitor NFS and Tape devices as much as io_* plugins with setting environments. Note that instance names of nfs (e.g. nfs1) can be changed after reboot or @@ -41,7 +41,7 @@ example: env.class /disk|nfs|tape/ default: disk - env.module - Module name. Only used in internal graph name. + env.module - Module name. Only used in internal graph name. example: env.module something default: sd @@ -60,7 +60,7 @@ example: env.title_type Disk Device, NFS, Tape default: Disk Device - env.exclude - Device instance name(s) to exclude seperated by white-space. + env.exclude - Device instance name(s) to exclude separated by white-space. example: env.exclude sd0 ssd1 default: none @@ -102,7 +102,7 @@ functions='ops bytes busy queue latency size' : "${graph_width:=}" # Create map of instance name (e.g. sd0) and logical device name (e.g. c0t0d0) -# Example: +# Example: # name_sd1=c0t0d0 # name_ssd2=c0tAB_1234d0 (shorten long target) # ... @@ -213,7 +213,7 @@ do_config() { latency|size) for stat in reads writes do - echo "${dev}_${stat}.label dummy" + echo "${dev}_${stat}.label dummy" echo "${dev}_${stat}.graph no" echo "${dev}_${stat}.type DERIVE" echo "${dev}_${stat}.min 0" @@ -243,7 +243,7 @@ do_config() { esac # Print data attributes - echo "${dev}_${conf_out}.label dummy" + echo "${dev}_${conf_out}.label dummy" echo "${dev}_${conf_out}.graph no" echo "${dev}_${conf_out}.type DERIVE" echo "${dev}_${conf_out}.min 0" @@ -334,7 +334,7 @@ autoconf) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/solaris/solaris-memstat b/plugins/solaris/solaris-memstat index 13e30d1c..64e3e39c 100755 --- a/plugins/solaris/solaris-memstat +++ b/plugins/solaris/solaris-memstat @@ -41,11 +41,10 @@ if [ "$1" = "autoconf" ]; then if [ -e /usr/bin/mdb ]; then echo yes - exit 0 else echo /usr/bin/mdb not found - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -89,7 +88,7 @@ fi echo "::memstat" | mdb -k | nawk ' BEGIN { - pagesize='$(getconf PAGESIZE)' + pagesize='"$(getconf PAGESIZE)"' kernel=0 zfs=0 anon=0 @@ -98,11 +97,11 @@ BEGIN { phys=0 } -/^Kernel/ { kernel=$2 } +/^Kernel/ { kernel=$2 } /^ZFS File Data/ { zfs=$4 } -/^Anon/ { anon=$2 } +/^Anon/ { anon=$2 } /^Exec and libs/ { exec=$4 } -/^Page cache/ { cacheused=$3 } +/^Page cache/ { cacheused=$3 } /^Free \(cachelist\)/ { cachefree=$3 } /^Free \(freelist\)/ { free=$3 } diff --git a/plugins/solaris/zones_cpu b/plugins/solaris/zones_cpu index 003d6d1a..5fb58f45 100755 --- a/plugins/solaris/zones_cpu +++ b/plugins/solaris/zones_cpu @@ -12,13 +12,12 @@ if [ "$1" = 'autoconf' ]; then if [ $zones -gt 1 ]; then echo yes else - echo yes + echo no fi - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = 'config' ]; then diff --git a/plugins/solaris/zones_mem b/plugins/solaris/zones_mem index d492af05..a25cb830 100755 --- a/plugins/solaris/zones_mem +++ b/plugins/solaris/zones_mem @@ -14,13 +14,12 @@ if [ "$1" = 'autoconf' ]; then if [ $zones -gt 1 ]; then echo yes else - echo yes + echo no fi - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = 'config' ]; then diff --git a/plugins/solr/solr b/plugins/solr/solr deleted file mode 100755 index 647ea5e9..00000000 --- a/plugins/solr/solr +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -import sys, os -import urllib2 -try: - from xml.etree import cElementTree as ET -except ImportError: - try: - import cElementTree as ET - except ImportError: - sys.exit(1) - -SOLR_PORT=8389 -SOLR_HOST="localhost" -# Parameters: -# -# config (required) -# autoconf (optional - used by munin-config) -# -# Magic markers (Used by munin-config and some installation scripts. -# Optional): -# author: Tarjei Huse (tarjei - at - kraken.no) -# -#%# family=auto -#%# capabilities=autoconf - -def parseArgs(): - "Parses the name of the file " - parts = sys.argv[0].split("_") - params = { - 'cores' : [core1], - 'valueName' : "avgTimePerRequest" - } - if len(parts) == 1: - return params - - params['valueName'] = parts[1] - if len(parts) < 3: - return params - params['cores'] = parts[2:] - return params - -def printGraphdef(name, label, info, _min, _max, _type): - - print "%s.label %s" % (name, label) -# print "%s.info %s" % (name, info) - print "%s.max %d" % (name, _max) - print "%s.min %d" % (name, _min) - print "%s.type %s" % (name, _type) -def fetchUrl(core): - if ("URL" in os.environ): - URL=os.environ['URL'] - else: - URL="http://%s:%d/%s/admin/stats.jsp" - - response = urllib2.urlopen(URL % (SOLR_HOST, SOLR_PORT, core)) - return parse(response) - -def parse(response): - root = ET.parse(response) - queues = root.findall("/solr-info/QUERYHANDLER/entry") - return queues - -def fetchFile(): - f = open("/tmp/stats.jsp.html") - return parse(f) - -def getEntry(entries, entryName): - for entry in entries: - name = entry.findtext("name").strip() - if (name != entryName): - continue - return entry.find("stats") - -def getValue(entry, valueName): - for stat in entry: - if stat.get('name') == valueName: - return stat.text - #print "Could not find %s for entry" % valueName - return 0 - -if len(sys.argv) > 1: - if sys.argv[1]== "autoconf": - # check connection - sys.exit(0) - elif sys.argv[1] == "config": - params = parseArgs() - - print 'graph_title Solr %s' % params['valueName'] - print 'graph_args -l 0 ' - print 'graph_vlabel Size %s' % params['valueName'] - print 'graph_category search' - print 'graph_info Info for cores: %s' % ( ",".join(params['cores'])) - - for core in params['cores']: - #print core, params['valueName'] - print "%s-%s.label %s-%s" % (core, params['valueName'], params['valueName'], core) - print "%s-%s.type GAUGE" % (core, params['valueName']) - print "%s-%s.min 0" % (core, params['valueName']) - sys.exit(0) - - -params = parseArgs() -for core in params['cores']: - #print core, params['valueName'] - queues = fetchUrl(core) - searcher = getEntry(queues, "org.apache.solr.handler.StandardRequestHandler") - value = getValue(searcher, params['valueName']).strip() - print "%s-%s.value %s" % (core, params['valueName'], value) - - -sys.exit(0) diff --git a/plugins/solr/solr-stats b/plugins/solr/solr-stats index e023d339..424d9e39 100755 --- a/plugins/solr/solr-stats +++ b/plugins/solr/solr-stats @@ -9,7 +9,7 @@ * Verify the server where the munin-node instance is can access that URL * * You need to have a PHP 5.2.6+ CLI installed too - * + * * Once the plugin is available you can simlink it with the following naming convention : * solr-[name of the core]-[name of the stats section - ex.: CORE]-[name of the entry in the xml - ex.: searcher]-[name of the stat to graph - ex.: numDocs] */ diff --git a/plugins/solr/solr4_ b/plugins/solr/solr4_ index 247c0810..b41c3741 100755 --- a/plugins/solr/solr4_ +++ b/plugins/solr/solr4_ @@ -1,56 +1,69 @@ #!/usr/bin/env python -# -# Copyright (c) 2013, Antonio Verni, me.verni@gmail.com -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# Solr 4.* munin graph plugin -# Project repo: https://github.com/averni/munin-solr -# -# Plugin configuration parameters: -# -# [solr_*] -# env.host_port -# env.url -# env.qpshandler_ -# -# Example: -# [solr_*] -# env.host_port solrhost:8080 -# env.url /solr -# env.qpshandler_select /select -# -# Install plugins: -# ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_numdocs_core_1 -# ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_requesttimes_select -# ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_qps -# ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_qps_core_1_select -# ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_indexsize -# ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_memory -# -# +""" +=head1 NAME + +Solr 4.* munin graph plugin + + +=head1 CONFIGURATION + +Plugin configuration parameters: + +[solr_*] + env.host_port + env.url + env.qpshandler_ + +Example: +[solr_*] + env.host_port solrhost:8080 + env.url /solr + env.qpshandler_select /select + +Install plugins: + ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_numdocs_core_1 + ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_requesttimes_select + ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_qps + ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_qps_core_1_select + ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_indexsize + ln -s /usr/share/munin/plugins/solr_.py /etc/munin/plugins/solr_memory + + +=head1 AUTHOR + +Copyright (c) 2013, Antonio Verni, me.verni@gmail.com + + +=head1 LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +Project repo: https://github.com/averni/munin-solr + +""" -import sys -import os -import httplib import json +import httplib +import os +import sys + def parse_params(): plugname = os.path.basename(sys.argv[0]).split('_', 2)[1:] @@ -60,29 +73,31 @@ def parse_params(): 'core': plugname[1] if len(plugname) > 1 else '', 'params': {} } - if plugname[0] in[ 'qps', 'requesttimes']: + if plugname[0] in['qps', 'requesttimes']: data = params['core'].rsplit('_', 1) handler = data.pop() params['params'] = { - 'handler': os.environ.get('qpshandler_%s' % handler, 'standard') + 'handler': os.environ.get('qpshandler_%s' % handler, 'standard'), } if not data: params['core'] = '' else: params['core'] = data[0] - elif plugname[0] == 'indexsize': + elif plugname[0] == 'indexsize': params['params']['core'] = params['core'] return params + ############################################################################# # Datasources class CheckException(Exception): pass + class JSONReader: @classmethod - def readValue(cls, struct, path, convert = None): + def readValue(cls, struct, path, convert=None): if not path[0] in struct: return -1 obj = struct[path[0]] @@ -94,6 +109,7 @@ class JSONReader: return convert(obj) return obj + class SolrCoresAdmin: def __init__(self, host, solrurl): self.host = host @@ -107,7 +123,8 @@ class SolrCoresAdmin: res = conn.getresponse() data = res.read() if res.status != 200: - raise CheckException("Cores status fetch failed: %s\n%s" %( str(res.status), res.read())) + raise CheckException("Cores status fetch failed: %s\n%s" + % (str(res.status), res.read())) self.data = json.loads(data) def getCores(self): @@ -116,7 +133,7 @@ class SolrCoresAdmin: cores = JSONReader.readValue(self.data, ['status']) return cores.keys() - def indexsize(self, core = None): + def indexsize(self, core=None): if not self.data: self.fetchcores() if core: @@ -126,9 +143,11 @@ class SolrCoresAdmin: else: ret = {} for core in self.getCores(): - ret[core] = JSONReader.readValue(self.data, ['status', core, 'index', 'sizeInBytes']) + ret[core] = JSONReader.readValue(self.data, + ['status', core, 'index', 'sizeInBytes']) return ret + class SolrCoreMBean: def __init__(self, host, solrurl, core): self.host = host @@ -143,7 +162,7 @@ class SolrCoreMBean: res = conn.getresponse() data = res.read() if res.status != 200: - raise CheckException("MBean fetch failed: %s\n%s" %( str(res.status), res.read())) + raise CheckException("MBean fetch failed: %s\n%s" % (str(res.status), res.read())) raw_data = json.loads(data) data = {} self.data = { @@ -164,24 +183,24 @@ class SolrCoreMBean: res = conn.getresponse() data = res.read() if res.status != 200: - raise CheckException("System fetch failed: %s\n%s" %( str(res.status), res.read())) + raise CheckException("System fetch failed: %s\n%s" % (str(res.status), res.read())) self.data['system'] = json.loads(data) - def _readInt(self, path): return self._read(path, int) def _readFloat(self, path): return self._read(path, float) - def _read(self, path, convert = None): + def _read(self, path, convert=None): if self.data is None: self._fetch() return JSONReader.readValue(self.data, path, convert) def _readCache(self, cache): result = {} - for key, ftype in [('lookups', int), ('hits', int), ('inserts', int), ('evictions', int), ('hitratio', float)]: + for key, ftype in [('lookups', int), ('hits', int), ('inserts', int), ('evictions', int), + ('hitratio', float)]: path = ['solr-mbeans', 'CACHE', cache, 'stats', 'cumulative_%s' % key] result[key] = self._read(path, ftype) result['size'] = self._readInt(['solr-mbeans', 'CACHE', cache, 'stats', 'size']) @@ -228,6 +247,7 @@ class SolrCoreMBean: data[k] = int(data[k]) return data + ############################################################################# # Graph Templates @@ -301,7 +321,7 @@ graph_vlabel docs docs.label Docs graph_category search""" -INDEXSIZE_GRAPH_TPL = """graph_args --base 1024 -l 0 +INDEXSIZE_GRAPH_TPL = """graph_args --base 1024 -l 0 graph_vlabel Bytes graph_title Index Size graph_category search @@ -313,7 +333,7 @@ xmx.colour ff0000 """ INDEXSIZECORE_GRAPH_TPL = """{core}.label {core} -{core}.draw STACK""" +{core}.draw STACK""" MEMORYUSAGE_GRAPH_TPL = """graph_args --base 1024 -l 0 --upper-limit {availableram} graph_vlabel Bytes @@ -325,6 +345,7 @@ max.label Max max.colour ff0000 """ + ############################################################################# # Graph management @@ -339,7 +360,8 @@ class SolrMuninGraph: return SolrCoreMBean(self.hostport, self.solrurl, core) def _cacheConfig(self, cacheType, cacheName): - return CACHE_GRAPH_TPL.format(core=self.params['core'], cacheType=cacheType, cacheName=cacheName) + return CACHE_GRAPH_TPL.format(core=self.params['core'], cacheType=cacheType, + cacheName=cacheName) def _format4Value(self, value): if isinstance(value, basestring): @@ -350,18 +372,20 @@ class SolrMuninGraph: return "%.6f" return "%s" - def _cacheFetch(self, cacheType, fields = None): + def _cacheFetch(self, cacheType, fields=None): fields = fields or ['size', 'lookups', 'hits', 'inserts', 'evictions'] hits_fields = ['lookups', 'hits', 'inserts'] size_fields = ['size', 'evictions'] results = [] solrmbean = self._getMBean(self.params['core']) data = getattr(solrmbean, cacheType)() - results.append('multigraph solr_{core}_{cacheType}_hit_rates'.format(core=self.params['core'], cacheType=cacheType)) + results.append('multigraph solr_{core}_{cacheType}_hit_rates' + .format(core=self.params['core'], cacheType=cacheType)) for label in hits_fields: vformat = self._format4Value(data[label]) results.append(("%s.value " + vformat) % (label, data[label])) - results.append('multigraph solr_{core}_{cacheType}_size'.format(core=self.params['core'], cacheType=cacheType)) + results.append('multigraph solr_{core}_{cacheType}_size' + .format(core=self.params['core'], cacheType=cacheType)) for label in size_fields: results.append("%s.value %d" % (label, data[label])) return "\n".join(results) @@ -385,12 +409,14 @@ class SolrMuninGraph: def qpsConfig(self): cores = self._getCores() - graph = [QPSCORE_GRAPH_TPL.format(core=c, gtype='LINESTACK1') for pos,c in enumerate(cores) ] + graph = [QPSCORE_GRAPH_TPL.format(core=c, gtype='LINESTACK1') + for pos, c in enumerate(cores)] return QPSMAIN_GRAPH_TPL.format( - cores_qps_graphs='\n'.join(graph), - handler=self.params['params']['handler'], - core=self.params['core'], - cores_qps_cdefs='%s,%s' % (','.join(map(lambda x: 'qps_%s' % x, cores)),','.join(['+']*(len(cores)-1))), + cores_qps_graphs='\n'.join(graph), + handler=self.params['params']['handler'], + core=self.params['core'], + cores_qps_cdefs='%s,%s' % (','.join(map(lambda x: 'qps_%s' % x, cores)), + ','.join(['+'] * (len(cores) - 1))), gorder=','.join(cores) ) @@ -399,12 +425,14 @@ class SolrMuninGraph: cores = self._getCores() for c in cores: mbean = self._getMBean(c) - results.append('qps_%s.value %d' % (c, mbean.requestcount(self.params['params']['handler']))) + results.append('qps_%s.value %d' + % (c, mbean.requestcount(self.params['params']['handler']))) return '\n'.join(results) def requesttimesConfig(self): cores = self._getCores() - graphs = [REQUESTTIMES_GRAPH_TPL.format(core=c, handler=self.params['params']['handler']) for c in cores ] + graphs = [REQUESTTIMES_GRAPH_TPL.format(core=c, handler=self.params['params']['handler']) + for c in cores] return '\n'.join(graphs) def requesttimes(self): @@ -426,7 +454,7 @@ class SolrMuninGraph: def indexsizeConfig(self): cores = self._getCores() - graph = [ INDEXSIZECORE_GRAPH_TPL.format(core=c) for c in cores] + graph = [INDEXSIZECORE_GRAPH_TPL.format(core=c) for c in cores] return INDEXSIZE_GRAPH_TPL.format(cores=" ".join(cores), cores_config="\n".join(graph)) def indexsize(self): @@ -446,7 +474,6 @@ class SolrMuninGraph: return MEMORYUSAGE_GRAPH_TPL.format(availableram=memory['max'] * 1.05) def memory(self): - results = [] cores = self._getCores() mbean = self._getMBean(cores[0]) memory = mbean.memory() @@ -476,13 +503,13 @@ class SolrMuninGraph: def queryresultcache(self): return self._cacheFetch('queryresultcache') + if __name__ == '__main__': params = parse_params() SOLR_HOST_PORT = os.environ.get('host_port', 'localhost:8080').replace('http://', '') - SOLR_URL = os.environ.get('url', '/solr') + SOLR_URL = os.environ.get('url', '/solr') if SOLR_URL[0] != '/': - SOLR_URL = '/' + SOLR_URL + SOLR_URL = '/' + SOLR_URL mb = SolrMuninGraph(SOLR_HOST_PORT, SOLR_URL, params) if hasattr(mb, params['op']): - print getattr(mb, params['op'])(params['type']) - + print(getattr(mb, params['op'])(params['type'])) diff --git a/plugins/solr/solrmulticore b/plugins/solr/solrmulticore deleted file mode 100755 index b210246f..00000000 --- a/plugins/solr/solrmulticore +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/python -# -# Copyright (C) Rodolphe Franceschi -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# -# This plugin monitors a SOLR server configured for multicore by automatically -# getting core names from SOLR default page. -# -# Tested on SOLR 1.4.0 -# -# Parameters: -# config (required) -# autoconf (optional - used by munin-config) -# -# For the full list of options, refer to PLUGINOPTIONSLIST variable -# -# Example of symlink creation on Debian Lenny -# ln -s /usr/share/munin/plugins/solrmulticore_ /etc/munin/plugins/solrmulticore_avgRequestsPerSecond -# -# Magic markers (Used by munin-config and some installation scripts. -# Optional): -#%# family=auto -#%# capabilities=autoconf - -import sys, os -import urllib2 -import HTMLParser, urllib - -try: - from xml.etree import cElementTree as ET -except ImportError: - try: - import cElementTree as ET - except ImportError: - sys.exit(1) - - -## GLOBALS -SOLR_PORT = 8983 -SOLR_HOST = "localhost" -PLUGINOPTIONSLIST = { - "avgRequestsPerSecond" : { 'xmlpath': '/solr-info/QUERYHANDLER/entry', 'xmlparententryname': 'org.apache.solr.handler.StandardRequestHandler', 'label': 'Average requests per second' }, - "avgTimePerRequest" : { 'xmlpath': '/solr-info/QUERYHANDLER/entry', 'xmlparententryname': 'org.apache.solr.handler.StandardRequestHandler', 'label': 'Average time per request' }, - "errors" : { 'xmlpath': '/solr-info/QUERYHANDLER/entry', 'xmlparententryname': 'org.apache.solr.handler.StandardRequestHandler', 'label': 'Number of errors' }, - "timeouts" : { 'xmlpath': '/solr-info/QUERYHANDLER/entry', 'xmlparententryname': 'org.apache.solr.handler.StandardRequestHandler', 'label': 'Number of timeouts' }, - "requests" : { 'xmlpath': '/solr-info/QUERYHANDLER/entry', 'xmlparententryname': 'org.apache.solr.handler.StandardRequestHandler', 'label': 'Total number of requests' }, - "numDocs" : { 'xmlpath': '/solr-info/CORE/entry', 'xmlparententryname': 'searcher', 'label': 'Number of documents' }, -# "" : { 'xmlpath': '/solr-info/QUERYHANDLER/entry', 'xmlparententryname': 'org.apache.solr.handler.StandardRequestHandler' }, -} - -# Automatic extraction of core names from SOLR default page if empty array -SOLR_CORES = [] -# If you do not want automatic gathering, feel free to put your array manually -#SOLR_CORES = [ "core0", "core1" ] - - -## FUNCTIONS -def getSolrCoreNameList(): - url = "http://%s:%s/solr/" % (SOLR_HOST, SOLR_PORT) - class linkParser(HTMLParser.HTMLParser): - def __init__(self): - HTMLParser.HTMLParser.__init__(self) - self.links = [] - def handle_starttag(self, tag, attrs): - if tag=='a': - self.links.append(dict(attrs)['href']) - - htmlSource = urllib.urlopen(url).read(200000) - p = linkParser() - p.feed(htmlSource) - - # Remove link to . (First one !) - p.links.pop(0) - - dealcores = [ ] - - for link in p.links: - dealcores.append ( link.split("/")[0] ) - - return dealcores - - - -def parseArgs(): - "Parses the name of the file " - parts = sys.argv[0].split("_") - - params = { } - params['valueName'] = parts[1] - - # Automatic / Manual Mode for core names - if (len(SOLR_CORES) == 0): - params['cores'] = getSolrCoreNameList() - else: - params['cores'] = SOLR_CORES - - params['cores'].sort() - - return params - -# For multicore / Solr 1.3, URL is like that -def fetchUrl(core, xmlPath): - URL="http://%s:%d/solr/%s/admin/stats.jsp" - - URLFULL = URL % (SOLR_HOST, SOLR_PORT, core) - response = urllib2.urlopen(URLFULL) - return parseXmlResponse(response, xmlPath) - -def parseXmlResponse(response, xmlPath): - root = ET.parse(response) - queues = root.findall(xmlPath) - return queues - -#def fetchFile(): -# f = open("/tmp/stats.jsp.html") -# return parseXmlResponse(f) - -def getEntry(entries, entryName): - for entry in entries: - name = entry.findtext("name").strip() - if (name != entryName): - continue - return entry.find("stats") - -def getValue(entry, valueName): - for stat in entry: - if stat.get('name') == valueName: - return stat.text - #print "Could not find %s for entry" % valueName - return 0 - - - - -## MAIN -if len(sys.argv) > 1: - if sys.argv[1]== "autoconf": - # check connection - sys.exit(0) - elif sys.argv[1] == "config": - params = parseArgs() - - # Extracting Generic graph datas - print 'graph_title %s' % ( PLUGINOPTIONSLIST[params['valueName']]['label'] ) - print "graph_args --base 1000"; - print 'graph_vlabel Size %s' % params['valueName'] - print 'graph_category search' - print 'graph_info Info for cores: %s' % ( ",".join(params['cores']) ) - - # Iterations for core datas - for core in params['cores']: - #print core, params['valueName'] - print "%s.label %s" % (core, core) - print "%s.type GAUGE" % (core) - print "%s.min 0" % (core) - sys.exit(0) - - -params = parseArgs() -for core in params['cores']: - #print core, params['valueName'] - queues = fetchUrl(core, PLUGINOPTIONSLIST[params['valueName']]['xmlpath']) - searcher = getEntry(queues, PLUGINOPTIONSLIST[params['valueName']]['xmlparententryname']) - value = getValue(searcher, params['valueName']).strip() - print "%s.value %s" % (core, value) - -sys.exit(0) diff --git a/plugins/solr/wfsolr_ b/plugins/solr/wfsolr_ index fa5d5025..708aaa1d 100755 --- a/plugins/solr/wfsolr_ +++ b/plugins/solr/wfsolr_ @@ -14,7 +14,7 @@ * * You need to have a PHP 5.2.6+ CLI installed too with curl extension or * allow_url_fopen directive on - * + * * Once the plugin is available you can symlink it with the following naming convention : * wfsolr-[name of the core]-[name of the stats section - ex.: CORE]-[name of the entry in the xml - ex.: searcher]-[name of the stat to graph - ex.: numDocs] * diff --git a/plugins/sourceds/srcds_cpu b/plugins/sourceds/srcds_cpu index 9a1b5099..ae56702b 100755 --- a/plugins/sourceds/srcds_cpu +++ b/plugins/sourceds/srcds_cpu @@ -130,15 +130,15 @@ sub test_service { my $sock = Rcon::sock_connect($HOST, $PORT); if (!$sock) { print "no (could not open socket to $HOST:$PORT)\n"; - exit 1; + exit 0; } if (!Rcon::rcon_auth($sock, $PASS)) { print "no (could not authenticate)\n"; - exit 1; + exit 0; } if (!defined(Rcon::rcon_command($sock, "stats"))) { print "no (did not receive reply from server)\n"; - exit 1; + exit 0; } print "yes\n"; diff --git a/plugins/sourceds/srcds_fps b/plugins/sourceds/srcds_fps index c2625b7e..813ad5c2 100755 --- a/plugins/sourceds/srcds_fps +++ b/plugins/sourceds/srcds_fps @@ -132,15 +132,15 @@ sub test_service { my $sock = Rcon::sock_connect($HOST, $PORT); if (!$sock) { print "no (could not open socket to $HOST:$PORT)\n"; - exit 1; + exit 0; } if (!Rcon::rcon_auth($sock, $PASS)) { print "no (could not authenticate)\n"; - exit 1; + exit 0; } if (!defined(Rcon::rcon_command($sock, "stats"))) { print "no (did not receive reply from server)\n"; - exit 1; + exit 0; } print "yes\n"; diff --git a/plugins/sourceds/srcds_inout b/plugins/sourceds/srcds_inout index 2ae34792..aab351f1 100755 --- a/plugins/sourceds/srcds_inout +++ b/plugins/sourceds/srcds_inout @@ -143,15 +143,15 @@ sub test_service { my $sock = Rcon::sock_connect($HOST, $PORT); if (!$sock) { print "no (could not open socket to $HOST:$PORT)\n"; - exit 1; + exit 0; } if (!Rcon::rcon_auth($sock, $PASS)) { print "no (could not authenticate)\n"; - exit 1; + exit 0; } if (!defined(Rcon::rcon_command($sock, "stats"))) { print "no (did not receive reply from server)\n"; - exit 1; + exit 0; } print "yes\n"; diff --git a/plugins/sourceds/srcds_players b/plugins/sourceds/srcds_players index ce2c1621..22bb4f5a 100755 --- a/plugins/sourceds/srcds_players +++ b/plugins/sourceds/srcds_players @@ -106,15 +106,15 @@ sub test_service { my $sock = Rcon::sock_connect($HOST, $PORT); if (!$sock) { print "no (could not open socket to $HOST:$PORT)\n"; - exit 1; + exit 0; } if (!Rcon::rcon_auth($sock, $PASS)) { print "no (could not authenticate)\n"; - exit 1; + exit 0; } if (!defined(Rcon::rcon_command($sock, "stats"))) { print "no (did not receive reply from server)\n"; - exit 1; + exit 0; } print "yes\n"; diff --git a/plugins/sourceds/srcds_uptime b/plugins/sourceds/srcds_uptime index 0e4011e2..0aee1ea2 100755 --- a/plugins/sourceds/srcds_uptime +++ b/plugins/sourceds/srcds_uptime @@ -104,15 +104,15 @@ sub test_service { my $sock = Rcon::sock_connect($HOST, $PORT); if (!$sock) { print "no (could not open socket to $HOST:$PORT)\n"; - exit 1; + exit 0; } if (!Rcon::rcon_auth($sock, $PASS)) { print "no (could not authenticate)\n"; - exit 1; + exit 0; } if (!defined(Rcon::rcon_command($sock, "stats"))) { print "no (did not receive reply from server)\n"; - exit 1; + exit 0; } print "yes\n"; diff --git a/plugins/sphinx/sphindex_ b/plugins/sphinx/sphindex_ index 5cb775dd..73c3ef17 100755 --- a/plugins/sphinx/sphindex_ +++ b/plugins/sphinx/sphindex_ @@ -1,5 +1,4 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- +#!/usr/bin/env python3 # vim: set fileencoding=utf-8 # # Munin plugin to show number of documents in Sphinx index @@ -18,49 +17,56 @@ # This plugin requires pythons sphinxsearch module which can be installed via easy_install. # # ## Installation -# Copy file to directory /usr/share/munin/pligins/ and create symbolic links for each index you wish to monitor. +# Copy file to directory /usr/share/munin/pligins/ and create symbolic links for each index you +# wish to monitor. # For example, if you've got indexes called index1 and index2 create these symlinks: # # ln -s /usr/share/munin/plugins/sphindex_ /etc/munin/plugins/sphindex_index1 # ln -s /usr/share/munin/plugins/sphindex_ /etc/munin/plugins/sphindex_index2 # -# If you run munin-node at different box than Sphinxsearch you can specify hostname and port options in munin-node.conf: +# If you run munin-node at different box than Sphinxsearch you can specify hostname and port +# options in munin-node.conf: # # [sphindex_*] # env.server 10.216.0.141 # env.port 9312 # -#%# capabilities=autoconf -#%# family=contrib +# #%# capabilities=autoconf +# #%# family=contrib -import os, sys, sphinxsearch -progName = sys.argv[0] -indexName = progName[progName.find("_")+1:] +import os +import sys + +import sphinxsearch + + +prog_name = sys.argv[0] +index_name = prog_name[prog_name.find("_") + 1:] if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - print "yes" + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": warning = "0:" critical = "0:" - if "warning" in os.environ and os.environ["warning"] != None: + if "warning" in os.environ and os.environ["warning"]: warning = os.environ["warning"] - if "critical" in os.environ and os.environ["critical"] != None: + if "critical" in os.environ and os.environ["critical"]: critical = os.environ["critical"] - print "graph_title Sphinx index %s stats" % indexName - print "graph_vlabel docs count" - print "graph_category search" - print "documents_count.warning %s" % warning - print "documents_count.critical %s" % critical - print "documents_count.label Documents count in index" - print "graph_args --base 1000 -l 0" + print("graph_title Sphinx index %s stats" % index_name) + print("graph_vlabel docs count") + print("graph_category search") + print("documents_count.warning %s" % warning) + print("documents_count.critical %s" % critical) + print("documents_count.label Documents count in index") + print("graph_args --base 1000 -l 0") else: - if "server" in os.environ and os.environ["server"] != None: + if "server" in os.environ and os.environ["server"]: server = os.environ["server"] else: - server = "localhost" + server = "localhost" - if "port" in os.environ and os.environ["port"] != None: + if "port" in os.environ and os.environ["port"]: try: port = int(os.environ["port"]) except ValueError: @@ -71,7 +77,7 @@ else: client = sphinxsearch.SphinxClient() client.SetServer(server, port) client.SetLimits(0, 1, 0, 0) - result = client.Query("", indexName) - docCount = result["total_found"] + result = client.Query("", index_name) + doc_count = result["total_found"] - print "documents_count.value %d" % docCount + print("documents_count.value %d" % doc_count) diff --git a/plugins/sphinx/sphinx_documents b/plugins/sphinx/sphinx_documents index 9b73e15d..3a4cb67a 100755 --- a/plugins/sphinx/sphinx_documents +++ b/plugins/sphinx/sphinx_documents @@ -27,7 +27,7 @@ done exit 0;; esac -for i in `ls $idxpath/*.sph`; do +for i in `ls $idxpath/*.sph`; do echo -n "`basename $i .sph`.value " echo `indextool --dumpheader $i | grep ^total-doc | cut -f2 -d\:` done diff --git a/plugins/sphinx/sphinx_queries b/plugins/sphinx/sphinx_queries index 57db16e2..6cef29f8 100755 --- a/plugins/sphinx/sphinx_queries +++ b/plugins/sphinx/sphinx_queries @@ -25,7 +25,7 @@ if (isset($argc) && $argc > 1) { "graph_info This graph shows the number of queries for last 5 minutes\n" . "graph_category search\n" . "graph_args --base 1000 --lower-limit 0\n" . - "graph_vlabel Connections\n" . + "graph_vlabel Queries\n" . "graph_info The number of current queries\n" . "graph_order current\n" . "graph_total Total\n" . diff --git a/plugins/spotweb/spotweb_average b/plugins/spotweb/spotweb_average index 2f31ee23..a05b7784 100755 --- a/plugins/spotweb/spotweb_average +++ b/plugins/spotweb/spotweb_average @@ -8,7 +8,7 @@ # Original idea : smeerbartje (http://gathering.tweakers.net/forum/myreact/190949) # slommer (http://gathering.tweakers.net/forum/myreact/353335) # -# This program is free software: you can redistribute it and/or modify +# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. @@ -51,7 +51,7 @@ if(defined $ARGV[0] && $ARGV[0] eq 'config') print < $FROMSTAMP;'`; -$spots =~ /(\d+)/; +$spots =~ /(\d+)/; print "spots.value ".$1."\n"; # Downloaded diff --git a/plugins/spotweb/spotweb_cat_average b/plugins/spotweb/spotweb_cat_average index 6728a987..1022d21a 100755 --- a/plugins/spotweb/spotweb_cat_average +++ b/plugins/spotweb/spotweb_cat_average @@ -2,11 +2,11 @@ # # Munin plugin for number of spots by categories in a MySQL database # -# Copyright (C) 2011 - Rowdy Schwachfer (http://rowdy.nl) +# Copyright (C) 2011 - Rowdy Schwachöfer (http://rowdy.nl) # # Spotweb : http://github.com/spotweb/spotweb # -# This program is free software: you can redistribute it and/or modify +# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. @@ -53,7 +53,7 @@ if(defined $ARGV[0] && $ARGV[0] eq 'config') { print < $FROMSTAMP AND category = $_[0]$_[1];"`; - $spots =~ /(\d+)/; - print "cat".$_[0].".value ".$1."\n"; + $spots =~ /(\d+)/; + print "cat".$_[0].".value ".$1."\n"; } diff --git a/plugins/spotweb/spotweb_cat_total b/plugins/spotweb/spotweb_cat_total index a2013db7..0900de81 100755 --- a/plugins/spotweb/spotweb_cat_total +++ b/plugins/spotweb/spotweb_cat_total @@ -2,7 +2,7 @@ # # Munin plugin for total spot by categories in a spotweb MySQL database # -# Copyright (C) 2011 - Rowdy Schwachfer (http://rowdy.nl) +# Copyright (C) 2011 - Rowdy Schwachöfer (http://rowdy.nl) # # Spotweb : http://github.com/spotweb/spotweb # @@ -46,7 +46,7 @@ if(defined $ARGV[0] && $ARGV[0] eq 'config') { print <new("UTF-8", "LATIN1"); # $converted = $converter->convert("Text to convert"); # First all the simple readings -foreach my $attr qw (albums artists genres songs) { +foreach my $attr (qw(albums artists genres songs)) { $conn->print ("info total ${attr} ?"); my $line = uri_unescape($conn->getline); if ($line =~ /^info total ${attr} (\d+)$/) { diff --git a/plugins/squid/squid b/plugins/squid/squid index 885b9438..e1c3a27a 100755 --- a/plugins/squid/squid +++ b/plugins/squid/squid @@ -7,11 +7,11 @@ squid - Plugin to monitor squid statistics =head1 APPLICABLE SYSTEMS -Any system running squid and avialable cache_object:// +Any system running squid and available cache_object:// =head1 CONFIGURATION -The following environment settings are the default configuration. +The following environment settings are the default configuration. [squid] env.squidhost localhost @@ -82,9 +82,9 @@ _ return sprintf($r_text, $squid_host, $what, &make_auth_header()); } -sub make_auth_header +sub make_auth_header { - if(defined $squid_passwd && $squid_passwd ne "") + if(defined $squid_passwd && $squid_passwd ne "") { my $h_text = <<_; Authorization: Basic %s @@ -99,22 +99,22 @@ _ sub squid_requst { my $what = $_[0]; - die "Could not connect: $!\n" unless my $cachemgr = IO::Socket::INET->new(PeerAddr => $squid_host, - PeerPort => $squid_port, - Proto => 'tcp', + die "Could not connect: $!\n" unless my $cachemgr = IO::Socket::INET->new(PeerAddr => $squid_host, + PeerPort => $squid_port, + Proto => 'tcp', Timeout => $squid_request_timeout); my $request = &make_request($what); $cachemgr->syswrite($request, length($request)); return $cachemgr; } -if($ARGV[0] and $ARGV[0] eq "autoconf") +if($ARGV[0] and $ARGV[0] eq "autoconf") { if (squid_requst('menu')->getline ne "") { print "yes\n"; } exit 0; } -if($ARGV[0] and $ARGV[0] eq "config") +if($ARGV[0] and $ARGV[0] eq "config") { my %config; # ------------------------------------------------- squid_efficiency_ ----------------------------------------- @@ -125,7 +125,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{$name}{'graph'}{'vlabel'} = '%'; $config{$name}{'graph'}{'args'} = '--lower-limit 0 --upper-limit 100'; $config{$name}{'graph'}{'scale'} = 'no'; - $config{$name}{'graph'}{'category'} = 'webserver'; + $config{$name}{'graph'}{'category'} = 'loadbalancer'; $config{$name}{'graph'}{'order'} = 'all bytes memory disk'; $config{$name}{'field'}{'all'}{'draw'} = 'AREA'; $config{$name}{'field'}{'all'}{'label'} = 'Hits of all requests'; @@ -145,7 +145,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_traffic'}{'graph'}{'title'} = 'Traffic statistics'; $config{'squid_traffic'}{'graph'}{'vlabel'} = 'bytes in (-) / out (+) per second'; $config{'squid_traffic'}{'graph'}{'args'} = '--base 1000'; - $config{'squid_traffic'}{'graph'}{'category'} = 'webserver'; + $config{'squid_traffic'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_traffic'}{'graph'}{'order'} = 'client_http_in server_all_in server_http_in server_ftp_in ' . 'server_other_in icp_in icp_q_in icp_r_in cd_in client_http_hit_out ' . 'client_http_out server_all_out server_http_out server_ftp_out ' . @@ -179,7 +179,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_ipcache'}{'graph'}{'title'} = 'IP cache statistics'; $config{'squid_ipcache'}{'graph'}{'vlabel'} = 'Count'; $config{'squid_ipcache'}{'graph'}{'args'} = '--base 1000';# --logarithmic'; - $config{'squid_ipcache'}{'graph'}{'category'} = 'webserver'; + $config{'squid_ipcache'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_ipcache'}{'graph'}{'order'} = 'request hits misses numhits neghits invdreqests entries'; $config{'squid_ipcache'}{'field'}{'entries'}{'label'} = 'Entries'; $config{'squid_ipcache'}{'field'}{'request'}{'label'} = 'Requests'; @@ -195,7 +195,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_requests'}{'graph'}{'title'} = 'Requests statistics'; $config{'squid_requests'}{'graph'}{'vlabel'} = 'errors (-) / requests (+) per second'; $config{'squid_requests'}{'graph'}{'args'} = '--base 1000'; - $config{'squid_requests'}{'graph'}{'category'} = 'webserver'; + $config{'squid_requests'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_requests'}{'graph'}{'order'} = 'client_http_errors server_all_errors server_http_errors ' . 'server_ftp_errors server_other_errors ' . 'client_http_hits aborted_requests client_http_requests ' . @@ -226,7 +226,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_storedir'}{'graph'}{'title'} = 'Store directory statistics'; $config{'squid_storedir'}{'graph'}{'vlabel'} = 'bytes'; $config{'squid_storedir'}{'graph'}{'args'} = '--base 1000'; - $config{'squid_storedir'}{'graph'}{'category'} = 'webserver'; + $config{'squid_storedir'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_storedir'}{'graph'}{'order'} = 'maximum current'; $config{'squid_storedir'}{'field'}{'maximum'}{'label'} = 'Maximum'; $config{'squid_storedir'}{'field'}{'maximum'}{'draw'} = 'AREA'; @@ -238,7 +238,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_memory'}{'graph'}{'title'} = 'Store memory statistics'; $config{'squid_memory'}{'graph'}{'vlabel'} = 'bytes'; $config{'squid_memory'}{'graph'}{'args'} = '--base 1000'; - $config{'squid_memory'}{'graph'}{'category'} = 'webserver'; + $config{'squid_memory'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_memory'}{'graph'}{'order'} = 'mi_tsia mi_ob mi_sb mi_hb mi_fsb mi_fob mi_tiu mi_tf mi_ts ta ma mu'; $config{'squid_memory'}{'field'}{'mi_tsia'}{'label'} = '[mallinfo()] Total space in arena'; $config{'squid_memory'}{'field'}{'mi_ob'}{'label'} = '[mallinfo()] Ordinary blocks'; @@ -261,7 +261,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_meanobject'}{'graph'}{'title'} = 'Mean object size'; $config{'squid_meanobject'}{'graph'}{'vlabel'} = 'bytes'; $config{'squid_meanobject'}{'graph'}{'args'} = '--base 1000'; - $config{'squid_meanobject'}{'graph'}{'category'} = 'webserver'; + $config{'squid_meanobject'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_meanobject'}{'graph'}{'order'} = 'mos'; $config{'squid_meanobject'}{'field'}{'mos'}{'label'} = 'Mean object size'; $config{'squid_meanobject'}{'field'}{'mos'}{'draw'} = 'LINE1'; @@ -270,7 +270,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_cpu'}{'graph'}{'title'} = 'CPU usage'; $config{'squid_cpu'}{'graph'}{'vlabel'} = '%'; $config{'squid_cpu'}{'graph'}{'args'} = '--base 1000 --lower-limit 0'; - $config{'squid_cpu'}{'graph'}{'category'} = 'webserver'; + $config{'squid_cpu'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_cpu'}{'graph'}{'scale'} = 'no'; $config{'squid_cpu'}{'graph'}{'order'} = 'cur av5 av60'; $config{'squid_cpu'}{'field'}{'cur'}{'label'} = 'Current'; @@ -288,7 +288,7 @@ if($ARGV[0] and $ARGV[0] eq "config") $config{'squid_ids'}{'graph'}{'title'} = 'Internal data structures'; $config{'squid_ids'}{'graph'}{'vlabel'} = 'Count'; $config{'squid_ids'}{'graph'}{'args'} = '--lower-limit 0'; - $config{'squid_ids'}{'graph'}{'category'} = 'webserver'; + $config{'squid_ids'}{'graph'}{'category'} = 'loadbalancer'; $config{'squid_ids'}{'graph'}{'order'} = 'se sewmo hoci odo'; $config{'squid_ids'}{'field'}{'se'}{'label'} = 'StoreEntries'; $config{'squid_ids'}{'field'}{'se'}{'draw'} = 'LINE1'; @@ -302,10 +302,10 @@ if($ARGV[0] and $ARGV[0] eq "config") foreach my $time (qw(5 60)) { my $name = sprintf("squid_mst_%s", $time); - $config{$name}{'graph'}{'title'} = "Median service times in ${time} minuts"; + $config{$name}{'graph'}{'title'} = "Median service times in ${time} minutes"; $config{$name}{'graph'}{'vlabel'} = 'seconds'; $config{$name}{'graph'}{'args'} = '--base 1000'; - $config{$name}{'graph'}{'category'} = 'webserver'; + $config{$name}{'graph'}{'category'} = 'loadbalancer'; $config{$name}{'graph'}{'order'} = 'hra cm ch nh nmr dl iq'; $config{$name}{'field'}{'hra'}{'label'} = 'HTTP Requests (All)'; $config{$name}{'field'}{'hra'}{'draw'} = 'LINE1'; @@ -349,7 +349,7 @@ my $cm; my %data; # ----------------- squid_efficiency_ and squid_memory -------------------- $cm = squid_requst('info'); -while (my $line = $cm->getline) +while (my $line = $cm->getline) { # ----------------- squid_efficiency_ -------------------- if($line =~ /Hits as % of all requests:.*/) { ($data{'squid_efficiency_5'}{'all'}, $data{'squid_efficiency_60'}{'all'}) = $line =~ /([\d.]+)%/g; next; } @@ -392,7 +392,7 @@ while (my $line = $cm->getline) # ----------------- squid_ipcache -------------------- $cm = squid_requst('ipcache'); -while (my $line = $cm->getline) +while (my $line = $cm->getline) { if($line =~ /IPcache Entries:.*/) { $data{'squid_ipcache'}{'entries'} = ($line =~ /(\d+)/g)[0]; next; } if($line =~ /IPcache Requests:.*/) { $data{'squid_ipcache'}{'request'} = ($line =~ /(\d+)/g)[0]; next; } @@ -461,5 +461,5 @@ foreach my $mgraph (sort keys(%data)) } # for Munin Plugin Gallery -# graph_category webserver +# graph_category loadbalancer diff --git a/plugins/squid/squid_efficiency b/plugins/squid/squid_efficiency index 0e4a9ac8..286c9f3a 100755 --- a/plugins/squid/squid_efficiency +++ b/plugins/squid/squid_efficiency @@ -51,7 +51,7 @@ # # [squid_efficiency] # env.squidhost yourhost.example.com -# env.squidport 8080 +# env.squidport 8080 # host=${squidhost:-localhost} @@ -60,7 +60,7 @@ port=${squidport:-3128} test "$1" = "config" && { echo 'graph_title Squid Efficiency' echo 'graph_info This graph shows the proxy efficiency over the last five mins.' - echo 'graph_category webserver' + echo 'graph_category loadbalancer' echo "graph_args --lower-limit 0 --upper-limit 100" echo 'graph_vlabel %' echo 'request.label request hits' diff --git a/plugins/squid/squid_times b/plugins/squid/squid_times index 23f935e0..4817e260 100755 --- a/plugins/squid/squid_times +++ b/plugins/squid/squid_times @@ -4,7 +4,7 @@ # License GPL V2 or higher # # Abstract -# munin plugin that logs the cache mean services times +# munin plugin that logs the cache mean services times # Requires netcat (here nc) # # Authors @@ -19,23 +19,22 @@ port=${squidport:-3128} if [ "$1" = "autoconf" ]; then SQUID_STATS=`printf "GET cache_object://$host/info HTTP/1.0\n\n" | netcat $host $port` if [ -n "${SQUID_STATS}" ]; then - echo yes - exit 0 + echo yes else echo "no (HTTP GET failed)" - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then echo 'graph_title Squid Median Services Times' echo 'graph_info This graph shows the proxy median services response times.' - echo 'graph_category webserver' + echo 'graph_category loadbalancer' echo 'graph_args --lower-limit 0' echo 'graph_vlabel median response times (s)' echo 'mean_http.label Http' - echo 'mean_cmis.label Cache misses' + echo 'mean_cmis.label Cache misses' echo 'mean_chits.label Cache hits' echo 'mean_nhits.label Near hits' echo 'mean_nmr.label Not-modified replies' @@ -43,7 +42,7 @@ if [ "$1" = "config" ]; then echo 'mean_icpq.label Icp queries' exit 0 -fi +fi SQUID_TIME=$(printf "GET cache_object://$host/info HTTP/1.0\n\n" | nc $host $port) @@ -57,7 +56,7 @@ SQUID_TIME_ICP_QUERIES=$(echo "$SQUID_TIME" | grep "ICP Queries" | cut -d':' -f2 echo "mean_http.value $SQUID_TIME_HTTP" echo "mean_cmis.value $SQUID_TIME_CACHE_MISSES" -echo "mean_chits.value $SQUID_TIME_CACHE_HITS" +echo "mean_chits.value $SQUID_TIME_CACHE_HITS" echo "mean_nhits.value $SQUID_TIME_NEAR_HITS" echo "mean_nmr.value $SQUID_TIME_NM_REPLIES" echo "mean_dnsl.value $SQUID_TIME_DNS_LOOKUPS" diff --git a/plugins/ssh/example-graphs/hostdenied-week.png b/plugins/ssh/example-graphs/hostdenied-week.png new file mode 100644 index 00000000..d7366d3b Binary files /dev/null and b/plugins/ssh/example-graphs/hostdenied-week.png differ diff --git a/plugins/ssh/example-graphs/sshd_log-day.png b/plugins/ssh/example-graphs/sshd_log-day.png new file mode 100644 index 00000000..525c9c03 Binary files /dev/null and b/plugins/ssh/example-graphs/sshd_log-day.png differ diff --git a/plugins/ssh/example-graphs/sshd_log-month.png b/plugins/ssh/example-graphs/sshd_log-month.png new file mode 100644 index 00000000..d15366d4 Binary files /dev/null and b/plugins/ssh/example-graphs/sshd_log-month.png differ diff --git a/plugins/ssh/hostdenied b/plugins/ssh/hostdenied index 9996f2d6..e950e6cf 100755 --- a/plugins/ssh/hostdenied +++ b/plugins/ssh/hostdenied @@ -13,7 +13,7 @@ # (at your option) any later version. # # This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of +# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # @@ -22,19 +22,19 @@ # # ------------------------------------------------------------------------------------------------------ # 20100310 v1.01 ls -# as threatened, shows now "temperatures" of active hosts.deny lines. Recent additions are +# as threatened, shows now "temperatures" of active hosts.deny lines. Recent additions are # displayed in bright red, turning to blue as older the addition rules are. # This requires denyhosts to add line to hosts.deny in a specific format. Also, times are currently # hardcoded, and not a lot of flexibility adjusting them through parameters. # A line in hosts.deny should come with a comment, looking like: # # DenyHosts: Sat Mar 6 01:11:57 2010 | sshd: 87.101.51.198 # 8 graphs are drawn from that depicting number of rules in 24 h increments. Different colours are -# assigned to graphs which are <24h, 24-48h, 48-72h ... old. The last (coldest) graph shows rules +# assigned to graphs which are <24h, 24-48h, 48-72h ... old. The last (coldest) graph shows rules # which have been added > 168h ago. # I'm considerering to change age granularity to hours, rather than days, and plot many graphs (64 or 128, -# which are nice for colour calculations), showing more of a colour cloud than discernible areas. +# which are nice for colour calculations), showing more of a colour cloud than discernible areas. # The plugin must have permission to read /etc/hosts.deny, of course. -# 20100308, v1.0, ls +# 20100308, v1.0, ls # Will probably add multiple stacked graphs, indicative for addition/removal date of denies, # instead of a boring single area graph. # ------------------------------------------------------------------------------------------------------ @@ -44,32 +44,34 @@ # ------------------------------------------------------------------------------------------------------ DENY="/etc/hosts.deny" -NAME="$(basename $0)" # component of naming temporary files +NAME="$(basename "$0")" # component of naming temporary files STATEFILE="$MUNIN_PLUGSTATE/$NAME.state" COLOUR=(FF0000 DA0024 B60048 91006D 6D0091 4800B6 2400DA 0000FF) # hot to cold colours # ------------------------------------------------------------------------------------------------------ run_autoconf() { - RUN="no" - which grep denyhosts basename > /dev/null && RUN="yes" # only run when grep and denyhost are present + RUN="no (denyhosts not found)" + command -v denyhosts > /dev/null && RUN="yes" # only run when denyhosts is present echo "$RUN" } run_config() { -cat << EOF -graph_title denied sshd access in $DENY + cat << EOF +graph_title Hosts denied sshd access +graph_info Hosts denied sshd access in $DENY graph_args --base 1000 -l 0 graph_vlabel Hosts denied graph_category security -age0.label added last 24h -age0.draw AREA -age0.colour ${COLOUR[0]} EOF -for AGE in {1..7}; do -cat << EOF -age${AGE}.label older than $((AGE*24))h -age${AGE}.draw STACK +for AGE in {7..0}; do + if [ "${AGE}" = 0 ]; then + echo "age${AGE}.label added last 24h" + else + echo "age${AGE}.label older than $((AGE*24))h" + fi + cat << EOF +age${AGE}.draw AREASTACK age${AGE}.colour ${COLOUR[$AGE]} EOF done @@ -80,17 +82,17 @@ run_fetch() { TOTAL=0 NOW=$(date +%s) sed -n 's/^\# DenyHosts: //;s/ | .*//gp' $DENY | # strip all but date - while read DATE; do + while read -r DATE; do echo $(((NOW - $(date -d "$DATE" +%s))/86400)) # calculate rule age - done > $STATEFILE # rather than going through temp file, the age could be - for AGE in {0..6} ; do # used to increment an array element with that index. - COUNT="$(grep -c "^$AGE$" $STATEFILE)" # That'd save grepping for counting from temp file. + done > "$STATEFILE" # rather than going through temp file, the age could be + for AGE in {6..0} ; do # used to increment an array element with that index. + COUNT="$(grep -c "^$AGE$" "$STATEFILE")" # That'd save grepping for counting from temp file. echo "age${AGE}.value $COUNT" # produce values for all but oldest ((TOTAL+=COUNT)) done - echo "age7.value $(($(grep -c . $STATEFILE)-TOTAL))" # all non-printed are older - rm $STATEFILE + echo "age7.value $(($(grep -c . "$STATEFILE")-TOTAL))" # all non-printed are older + rm "$STATEFILE" } -run_${1:-"fetch"} +run_"${1:-fetch}" exit 0 diff --git a/plugins/ssh/openssh-denyhosts b/plugins/ssh/openssh-denyhosts index 46c13bcd..f94478bd 100755 --- a/plugins/ssh/openssh-denyhosts +++ b/plugins/ssh/openssh-denyhosts @@ -11,8 +11,8 @@ # mktempfile () { -mktemp -t -} +mktemp -t +} AUTH_LOG=${logfile:-/var/log/auth.log} STATEFILE=$MUNIN_PLUGSTATE/sshd.offset @@ -21,11 +21,10 @@ LOGTAIL=${logtail:-`which logtail`} if [ "$1" = "autoconf" ]; then if [ -f "${AUTH_LOG}" -a -n "${LOGTAIL}" -a -x "${LOGTAIL}" ] ; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then @@ -59,7 +58,7 @@ then $LOGTAIL ${AUTH_LOG} $STATEFILE | grep 'sshd' > ${TEMP_FILE} refused=`grep -ic 'refused' ${TEMP_FILE}` - accepted=`grep -ic 'accepted' ${TEMP_FILE}` + accepted=`grep -ic 'accepted' ${TEMP_FILE}` invalid=`grep -ic 'invalid user' ${TEMP_FILE}` failed=`grep -ic 'failed password' ${TEMP_FILE}` diff --git a/plugins/ssh/sshd_invalid_countries b/plugins/ssh/sshd_invalid_countries index e36b3201..9fa3e73d 100755 --- a/plugins/ssh/sshd_invalid_countries +++ b/plugins/ssh/sshd_invalid_countries @@ -2,7 +2,7 @@ b[0]} - c + c.to_a.sort { |a, b| a[0] <=> b[0] } end case ARGV[0] when 'autoconf' begin fh = open(SYSLOG, 'r') - rescue + rescue StandardError puts 'no' - exit 1 + exit 0 else puts 'yes' exit 0 @@ -73,8 +76,8 @@ when 'config' puts 'graph_vlabel number of invalid access per country' puts 'graph_category security' puts 'graph_info This graph shows the countries of invalid access to sshd.' - getInvalids.each {|k,v| puts k + '.label ' + k} + getInvalids.each { |k, _v| puts k + '.label ' + k } exit 0 else - getInvalids.each {|k,v| puts k + '.value ' + v.to_s} + getInvalids.each { |k, v| puts k + '.value ' + v.to_s } end diff --git a/plugins/ssh/sshd_log b/plugins/ssh/sshd_log index 608f49ec..deb9a063 100755 --- a/plugins/ssh/sshd_log +++ b/plugins/ssh/sshd_log @@ -1,66 +1,175 @@ #!/bin/sh -# -# Plugin to monitor auth.log for sshd server events. -# -# Require read permitions for $LOG -# (set in /etc/munin/plugin-conf.d/munin-node on debian) -# On busy servers you can change value type to COUNTER and set min to 0 to avoid minus peaks at logrotate -# -# $Log$ -# Revision 1.2 2010/03/19 15:03:00 pmoranga -# Revision 1.1 2009/04/26 23:28:00 ckujau -# Revision 1.0 2009/04/22 22:00:00 zlati -# Initial revision -# -# Parameters: + +: <<=cut + +=head1 NAME + +sshd_log - Munin plugin to monitor auth.log or journald for sshd + server events. + +=head1 CONFIGURATION + +This plugin requires read permission for the logfile or journald. + +On busy servers you can change value type to COUNTER and set min to 0 +to avoid minus peaks at logrotate. + +The following environment variables are used by this plugin: + + logfile - path to the auth log file, or "journald" to use journald. + default: /var/log/secure + + journalctlargs - space separated list of arguments to pass to + journalctl to get the sshd logs. + default: _COMM=sshd + + type - "GAUGE" or "DERIVE" + default: GAUGE + +If the "logfile" environment variable is set to "journald" the sshd +logs are read from journald, filtering on program "sshd". The filtering +may be changed using "journalctlargs". + + +Config examples for /etc/munin/plugin-conf.d/munin-node: + + [sshd_log] + user root + group root + env.logfile /var/log/messages + +Config example with journald: + + [sshd_log] + group systemd-journal + env.logfile journald + +Config example with journald on the sshd.service unit only: + + [sshd_log] + group systemd-journal + env.logfile journald + env.journalctlargs --unit=sshd.service + +Config example with journald and type DERIVE: + + [sshd_log] + group systemd-journal + env.logfile journald + env.type DERIVE + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 AUTHOR + +Revision 2.0 2016/11/11 15:42:00 Thomas Riccardi +Revision 1.2 2010/03/19 15:03:00 pmoranga +Revision 1.1 2009/04/26 23:28:00 ckujau +Revision 1.0 2009/04/22 22:00:00 zlati + +=cut + + +# Script parameters: # # config (required) # autoconf (optional - used by munin-config) -# -# Magick markers (optional): -#%# family=auto -#%# capabilities=autoconf -# config example for /etc/munin/plugin-conf.d/munin-node -#[sshd_log] -#user root -#group root -#env.logfile /var/log/messages -#env.category users -# LOG=${logfile:-/var/log/secure} +JOURNALCTL_ARGS=${journalctlargs:-_COMM=sshd} +TYPE=${type:-GAUGE} +if [ "$LOG" = "journald" -a "$TYPE" = "DERIVE" ]; then + TYPE=ABSOLUTE +fi if [ "$1" = "autoconf" ]; then - if [ -r "$LOG" ]; then - echo yes - exit 0 + if [ "$LOG" = "journald" ]; then + # shellcheck disable=SC2086,SC2034 + if journalctl --no-pager --quiet --lines=1 $JOURNALCTL_ARGS | read -r DUMMY; then + echo "yes" + else + echo "no (journald empty log for '$JOURNALCTL_ARGS' not found)" + fi else - echo no - exit 1 + if [ -r "$LOG" ]; then + echo "yes" + else + echo "no (logfile '$LOG' not readable)" + fi fi + exit 0 fi if [ "$1" = "config" ]; then - - echo 'graph_title SSHD login stats from' $LOG + echo 'graph_title SSHD login stats' + echo 'graph_info SSHD login stats from' "$LOG" echo 'graph_args --base 1000 -l 0' echo 'graph_vlabel logins' echo 'graph_category' security echo 'LogPass.label Successful password logins' + echo 'LogPass.min 0' + echo 'LogPass.type' "$TYPE" + echo 'LogPassPAM.label Successful login via PAM' + echo 'LogPassPAM.min 0' + echo 'LogPassPAM.type' "$TYPE" + echo 'LogKey.label Successful PublicKey logins' + echo 'LogKey.min 0' + echo 'LogKey.type' "$TYPE" + echo 'NoID.label No identification from user' + echo 'NoID.min 0' + echo 'NoID.type' "$TYPE" + echo 'rootAttempt.label Root login attempts' - echo 'InvUsr.label Invalid user login attepmts' + echo 'rootAttempt.min 0' + echo 'rootAttempt.type' "$TYPE" + + echo 'InvUsr.label Invalid user login attempts' + echo 'InvUsr.min 0' + echo 'InvUsr.type' "$TYPE" + echo 'NoRDNS.label No reverse DNS for peer' + echo 'NoRDNS.min 0' + echo 'NoRDNS.type' "$TYPE" + echo 'Breakin.label Potential Breakin Attempts' + echo 'Breakin.min 0' + echo 'Breakin.type' "$TYPE" + exit 0 fi -awk 'BEGIN{c["LogPass"]=0;c["LogKey"]=0;c["NoID"]=0;c["rootAttempt"]=0;c["InvUsr"]=0;c["LogPassPAM"]=0;c["Breakin"]=0;c["NoRDNS"]=0; } +if [ "$LOG" = "journald" -a "$TYPE" = "ABSOLUTE" ]; then + CURSOR_FILE="$MUNIN_STATEFILE" + # read cursor + # format: "journald-cursor " + CURSOR= + if [ -f "$CURSOR_FILE" ]; then + CURSOR=$(awk '/^journald-cursor / {print $2}' "$CURSOR_FILE") + fi +else + CURSOR_FILE= +fi + +if [ "$LOG" = "journald" ]; then + # shellcheck disable=SC2086 + if [ "$TYPE" = "ABSOLUTE" ]; then + journalctl --no-pager --quiet --show-cursor ${CURSOR:+"--after-cursor=$CURSOR"} $JOURNALCTL_ARGS + else + journalctl --no-pager --quiet --since=$(date -dlast-sunday +%Y-%m-%d) $JOURNALCTL_ARGS + fi +else + cat "$LOG" +fi | \ + awk -v cursor_file="$CURSOR_FILE" 'BEGIN{c["LogPass"]=0;c["LogKey"]=0;c["NoID"]=0;c["rootAttempt"]=0;c["InvUsr"]=0;c["LogPassPAM"]=0;c["Breakin"]=0;c["NoRDNS"]=0; } /sshd\[.*Accepted password for/{c["LogPass"]++} /sshd\[.*Accepted publickey for/{c["LogKey"]++} /sshd\[.*Did not receive identification string/{c["NoID"]++} @@ -69,4 +178,4 @@ awk 'BEGIN{c["LogPass"]=0;c["LogKey"]=0;c["NoID"]=0;c["rootAttempt"]=0;c["InvUsr /sshd\[.*POSSIBLE BREAK-IN ATTEMPT!/{c["Breakin"]++} /sshd\[.*keyboard-interactive\/pam/{c["LogPassPAM"]++} /sshd\[.*reverse mapping checking getaddrinfo/{c["NoRDNS"]++}a - END{for(i in c){print i".value " c[i]} }' < $LOG + END{if (cursor_file != "") { print "journald-cursor " $3 > cursor_file };for(i in c){print i".value " c[i]} }' diff --git a/plugins/ssl/certificate_file_expiry b/plugins/ssl/certificate_file_expiry new file mode 100755 index 00000000..09f2b854 --- /dev/null +++ b/plugins/ssl/certificate_file_expiry @@ -0,0 +1,72 @@ +#!/bin/sh +: << =cut +=head1 NAME + +certificate_file_expiry - check the certificate validity of your certificates + += head1 CONFIGURATION + +Installing: Add list of your certificates prefixed by the type in munin plugin-conf.d + +For openvpn ca.crt and crl.pem +[certificate_file_expiry] +user root +env.CERTS crl:/etc/openvpn/easy-rsa/keys/crl.pem x509:/etc/openvpn/easy-rsa/keys/ca.crt + +For letsencrypt certificates +[certificate_file_expiry] +user root +env.CERTS x509:/etc/letsencrypt/live/domain1.example.com/cert.pem x509:/etc/letsencrypt/live/domain2.example.com/cert.pem + +Warning and Critical levels can also be configured with env variables like this +[certificate_file_expiry] +... +# warn when certificate will be invalid within 5 days +env.warning 5: +# critical when certificate will be invalid within 1 day +env.critical 1: + +=head1 Dependencies + +Dependencies: openssl + +=head1 AUTHOR + +andreas perhab - andreas.perhab@wt-io-it.at +https://www.wt-io-it.at/ + +=head1 LICENSE + +GPLv2 + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +if [ "$1" = "config" ] ; then + echo "graph_title Certificate validity" + echo "graph_args --logarithmic --base 1000" + echo "graph_vlabel certificate validity in days" + echo "graph_category security" +fi + +now=$(date +%s) +warning=${warning:-5:} +critical=${critical:-1:} +for cert in ${CERTS}; do + cert_type=${cert%:*} + cert_file=${cert#*:} + cert_name=$(clean_fieldname "$cert_file") + if [ "$1" = "config" ] ; then + echo "${cert_name}.label ${cert_file}" + print_warning "$cert_name" + print_critical "$cert_name" + elif [ "$1" = "" ] ; then + validity=$(/usr/bin/openssl "$cert_type" -text -noout -in "$cert_file" | grep -E '(Next Update|Not After)') + validity=${validity#*:} + validity=$(date --date="$validity" +%s) + validity=$((validity - now)) + validity=$(echo "$validity" | awk '{ print ($1 / 86400) }') + echo "${cert_name}.value $validity" + fi +done diff --git a/plugins/ssl/letsencrypt_weekly b/plugins/ssl/letsencrypt_weekly new file mode 100755 index 00000000..96b905fd --- /dev/null +++ b/plugins/ssl/letsencrypt_weekly @@ -0,0 +1,62 @@ +#!/bin/sh +: << =cut +=head1 NAME + +letsencrypt_weekly - monitor the number of CSRs by week for /etc/letsencrypt/csr/ + +see https://letsencrypt.org/docs/rate-limits/ + += head1 CONFIGURATION + +You can configure the warning and critical limits for this plugin: + +[letsencrypt_weekly] +# warn when more than 40 certificates have been requested in the last week +env.warning :40 +# critical when more than 50 certificates have been requested in the last week +env.critical :50 + +=head1 Dependencies + +Dependencies: openssl + +=head1 AUTHOR + +andreas perhab - andreas.perhab@wt-io-it.at +https://www.wt-io-it.at/ + +=head1 LICENSE + +GPLv2 + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +warning=${warning:-:40} +critical=${critical:-:50} #letsencrypt doesn't allow more than 50 certificates per week +# see https://letsencrypt.org/docs/rate-limits/ + +if [ "$1" = "autoconf" ] ; then + test -d /etc/letsencrypt/csr/ && echo "yes" || echo "no (directory /etc/letsencrypt/csr does not exist)" +elif [ "$1" = "config" ] ; then + echo "graph_title Letsencrypt certificate requests during last week" + echo "graph_args --base 1000" + echo "graph_vlabel Number of certificates" + echo "graph_category security" + echo "letsencrypt_weekly.label Letsencrypt certificates last week" + print_warning "letsencrypt_weekly" + print_critical "letsencrypt_weekly" +elif [ "$1" = "" ] ; then + if existing_certs=$(find /etc/letsencrypt/csr/ -mtime -7 -type f 2>/dev/null); then + value=$(echo "$existing_certs" | wc -l) + else + value="U" + fi + echo "letsencrypt_weekly.value $value" +fi diff --git a/plugins/ssl/ssl-certificate-expiry b/plugins/ssl/ssl-certificate-expiry index 9295380a..41f6fce3 100755 --- a/plugins/ssl/ssl-certificate-expiry +++ b/plugins/ssl/ssl-certificate-expiry @@ -1,16 +1,22 @@ -#!/bin/sh +#!/bin/sh -u # -*- sh -*- +# shellcheck disable=SC2039 : << =cut =head1 NAME -ssl-certificate-expiry - Plugin to monitor CERTificate expiration on multiple services and ports +ssl-certificate-expiry - Plugin to monitor Certificate expiration on multiple services and ports =head1 CONFIGURATION [ssl-certificate-expiry] - env.services www.service.tld blah.example.net_PORT + env.services www.service.tld blah.example.net_PORT foo.example.net_PORT_STARTTLS + +PORT is the TCP port number +STARTTLS is passed to openssl as "-starttls" argument. Useful for services like SMTP or IMAP implementing StartTLS. + Current known values are ftp, imap, pop3 and smtp + PORT is mandatory if STARTTLS is used. To set warning and critical levels do like this: @@ -28,84 +34,173 @@ For example: ssl-certificate-expiry_www.example.org_443 ssl-certificate-expiry_192.0.2.42_636 ssl-certificate-expiry_2001:0DB8::badc:0fee_485 + ssl-certificate-expiry_mail.example.net_25_smtp -=head1 AUTHOR +=head2 Cron setup -Pactrick Domack (ssl_) -Olivier Mehani (ssl-certificate-expiry) +To avoid having to run the SSL checks during the munin-update, it is possible +to run it from cron, and save a cachefile to be read during the update, This is +particularly useful when checking a large number of certificates, or when some +of the hosts are slow. -Copyright (C) 2013 Patrick Domack -Copyright (C) 2017 Olivier Mehani +To do so, add a cron job running the plugin with cron as the argument: + + * * * /usr/sbin/munin-run/ssl-certificate-expiry cron + + should be the user that has write permission to the MUNIN_PLUGSTATE. + should be a number between 0 and 59 when the check should run every hour. + +If, for any reason, the cron script stops running, the script will revert to +uncached updates after the cache file is older than an hour. + +=head1 AUTHORS + + * Pactrick Domack (ssl_) + * Olivier Mehani (ssl-certificate-expiry) + * Martin Schobert (check for intermediate certs) + + * Copyright (C) 2013 Patrick Domack + * Copyright (C) 2017, 2019 Olivier Mehani + * Copyright (C) 2020 Martin Schobert =head1 LICENSE =cut +# shellcheck disable=SC1090 . "${MUNIN_LIBDIR}/plugins/plugin.sh" -if [ "${MUNIN_DEBUG}" = 1 ]; then +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then set -x fi HOSTPORT=${0##*ssl-certificate-expiry_} +CACHEFILE="${MUNIN_PLUGSTATE}/$(basename "${0}").cache" if [ "${HOSTPORT}" != "${0}" ] \ - && [ ! -z "${HOSTPORT}" ]; then + && [ -n "${HOSTPORT}" ]; then services="${HOSTPORT}" fi -case $1 in - config) +# Read data including a certificate from stdin and output the (fractional) number of days left +# until the expiry of this certificate. The output is empty if parsing failed. +parse_valid_days_from_certificate() { + local input_data + local valid_until_string + local valid_until_epoch + local now_epoch + local input_data + input_data=$(cat) + + if echo "$input_data" | grep -q -- "-----BEGIN CERTIFICATE-----"; then + valid_until_string=$(echo "$input_data" | openssl x509 -noout -enddate \ + | grep "^notAfter=" | cut -f 2 -d "=") + if [ -n "$valid_until_string" ]; then + # FreeBSD requires special arguments for "date" + if uname | grep -q ^FreeBSD; then + valid_until_epoch=$(date -j -f '%b %e %T %Y %Z' "$valid_until_string" +%s) + now_epoch=$(date -j +%s) + else + valid_until_epoch=$(date --date="$valid_until_string" +%s) + now_epoch=$(date +%s) + fi + if [ -n "$valid_until_epoch" ]; then + # calculate the number of days left + echo "$valid_until_epoch" "$now_epoch" | awk '{ print(($1 - $2) / (24 * 3600)); }' + fi + fi + fi +} + + +print_expire_days() { + local host="$1" + local port="$2" + local starttls="$3" + + # Wrap IPv6 addresses in square brackets + echo "$host" | grep -q ':' && host="[$host]" + + local s_client_args= + [ -n "$starttls" ] && s_client_args="-starttls $starttls" + + # We extract and check the server certificate, + # but the end date also depends on intermediate certs. Therefore + # we want to check intermediate certs as well. + # + # The following cryptic lines do: + # - invoke openssl and connect to a port + # - print certs, not only the server cert + # - extract each certificate as a single line + # - pipe each cert to the parse_valid_days_from_certificate + # function, which basically is 'openssl x509 -enddate' + # - get a list of the parse_valid_days_from_certificate + # results and sort them + + # shellcheck disable=SC2086 + echo "" | openssl s_client \ + -servername "$host" -connect "${host}:${port}" \ + -showcerts \ + $s_client_args 2>/dev/null | \ + awk '{ + if ($0 == "-----BEGIN CERTIFICATE-----") cert="" + else if ($0 == "-----END CERTIFICATE-----") print cert + else cert=cert$0 + }' | \ + while read -r CERT; do + (printf '\n-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----\n' "$CERT") | \ + parse_valid_days_from_certificate + done | sort -n | head -n 1 + +} + +main() { + for service in $services; do + if echo "$service" | grep -q "_"; then + host=$(echo "$service" | cut -f 1 -d "_") + port=$(echo "$service" | cut -f 2 -d "_") + starttls=$(echo "$service" | cut -f 3 -d "_") + else + host=$service + port=443 + starttls="" + fi + fieldname="$(clean_fieldname "$service")" + valid_days=$(print_expire_days "$host" "$port" "$starttls") + [ -z "$valid_days" ] && valid_days="U" + printf "%s.value %s\\n" "$fieldname" "$valid_days" + echo "${fieldname}.extinfo Last checked: $(date)" + done +} + +case ${1:-} in + config) echo "graph_title SSL Certificates Expiration" echo 'graph_args --base 1000' echo 'graph_vlabel days left' echo 'graph_category security' - echo "graph_info This graph shows the days left for the certificate" + echo "graph_info This graph shows the numbers of days before certificate expiry" for service in $services; do fieldname=$(clean_fieldname "$service") echo "${fieldname}.label $(echo "${service}" | sed 's/_/:/')" - print_thresholds "${fieldname}" + print_thresholds "${fieldname}" warning critical done + exit 0 + ;; + cron) + UPDATE="$(main)" + echo "${UPDATE}" > "${CACHEFILE}" + chmod 0644 "${CACHEFILE}" + exit 0 ;; esac -get_expire() -{ - SITE="$(echo "${1}" | sed 's/_.*//')" - PORT="$(echo "${1}" | sed 's/.*_//')" +if [ -n "$(find "${CACHEFILE}" -mmin -60 2>/dev/null)" ]; then + cat "${CACHEFILE}" + exit 0 +fi - VAR="$(clean_fieldname "$1")" - if [ "$PORT" = "$SITE" ]; then - PORT=443 - fi - if echo "$SITE" | grep -q ':'; then - # Wrap IPv6 addresses in square brackets - SITE="[${SITE}]" - fi - - CERT=$(echo "" | openssl s_client -CApath /etc/ssl/certs -servername "${SITE}" -connect "${SITE}:${PORT}" 2>/dev/null); - - if echo "${CERT}" | grep -q -- "-----BEGIN CERTIFICATE-----"; then - echo "${CERT}" \ - | openssl x509 -noout -enddate \ - | awk -F= 'BEGIN { - split("Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec", month, " "); - for (i=1; i<=12; i++) - mdigit[month[i]] = i; - } - /notAfter/ { - split($0,a,"="); split(a[2],b," "); split(b[3],time,":"); - datetime=b[4] " " mdigit[b[1]] " " b[2] " " time[1] " " time[2] " " time[3]; - days=(mktime(datetime)-systime())/86400; - print "VAR.value " days; - }' \ - | sed "s/VAR/${VAR}/g" - fi -} - -for service in $services; do - get_expire "$service" -done +main diff --git a/plugins/ssl/ssl_ b/plugins/ssl/ssl_ index 76e7686f..5c1c4fcc 100755 --- a/plugins/ssl/ssl_ +++ b/plugins/ssl/ssl_ @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # -*- sh -*- : << =cut @@ -26,15 +26,43 @@ Copyright (C) 2013 Patrick Domack =cut +# shellcheck disable=SC1090 . "$MUNIN_LIBDIR/plugins/plugin.sh" ARGS=${0##*ssl_} -SITE=${ARGS/_*/} -PORT=${ARGS##*_} -if [ "$PORT" = "$SITE" ]; then - PORT=443 +if echo "$ARGS" | grep -q "_"; then + SITE=$(echo "$ARGS" | cut -f 1 -d "_") + PORT=$(echo "$ARGS" | cut -f 2 -d "_") +else + SITE=$ARGS + PORT=443 fi + +# Read data including a certificate from stdin and output the (fractional) number of days left +# until the expiry of this certificate. The output is empty if parsing failed. +parse_valid_days_from_certificate() { + local input_data + local valid_until_string + local valid_until_epoch + local now_epoch + local input_data + input_data=$(cat) + if echo "$input_data" | grep -q -- "-----BEGIN CERTIFICATE-----"; then + valid_until_string=$(echo "$input_data" | openssl x509 -noout -enddate \ + | grep "^notAfter=" | cut -f 2 -d "=") + if [ -n "$valid_until_string" ]; then + valid_until_epoch=$(date --date="$valid_until_string" +%s) + if [ -n "$valid_until_epoch" ]; then + now_epoch=$(date +%s) + # calculate the number of days left + echo "$valid_until_epoch" "$now_epoch" | awk '{ print(($1 - $2) / (24 * 3600)); }' + fi + fi + fi +} + + case $1 in config) @@ -53,18 +81,7 @@ esac cert=$(echo "" | openssl s_client -CApath /etc/ssl/certs -servername "${SITE}" -connect "${SITE}:${PORT}" 2>/dev/null); -if [[ "${cert}" = *"-----BEGIN CERTIFICATE-----"* ]]; then - echo "${cert}" \ - | openssl x509 -noout -enddate \ - | awk -F= 'BEGIN { - split("Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec", month, " "); - for (i=1; i<=12; i++) - mdigit[month[i]] = i; - } - /notAfter/ { - split($0,a,"="); split(a[2],b," "); split(b[3],time,":"); - datetime=b[4] " " mdigit[b[1]] " " b[2] " " time[1] " " time[2] " " time[3]; - days=(mktime(datetime)-systime())/86400; - print "expire.value " days; - }' -fi +days_left=$(echo "$cert" | parse_valid_days_from_certificate) +[ -n "$days_left" ] || days_left="U" + +printf 'expire.value %s\n' "$days_left" diff --git a/plugins/streaming/packetship_ b/plugins/streaming/packetship_ index a9ec0964..b7465498 100755 --- a/plugins/streaming/packetship_ +++ b/plugins/streaming/packetship_ @@ -108,9 +108,7 @@ if [ "$1" = "config" ]; then # If dirty config capability is enabled then fall through # to output the data with the config information. - if [ "$MUNIN_CAP_DIRTYCONFIG" = "" ]; then - exit 0 - fi + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" != "1" ]; then exit 0; fi fi # If there are no pumps then output fake pump1 data and end. diff --git a/plugins/swap/swapspace-info b/plugins/swap/swapspace-info index 766afc36..a6d82cc3 100755 --- a/plugins/swap/swapspace-info +++ b/plugins/swap/swapspace-info @@ -44,7 +44,7 @@ http://www.brendangregg.com/K9Toolkit/swapinfo =head1 LICENSE -GPL 2. +GPL 2. =cut @@ -87,7 +87,7 @@ sub value my %VMinfo; # --- Fetch VM info --- - foreach my $count (0..12) + foreach my $count (0..12) { # # The values are counters that increment each second, here we @@ -97,16 +97,16 @@ sub value foreach my $var ("swap_avail","swap_alloc","swap_free") { $VMnow{$var} = $Kstat->{unix}->{0}->{vminfo}->{$var}; - unless ($count) + unless ($count) { $VMold{$var} = $VMnow{$var}; next; } - if (($VMnow{$var} != $VMold{$var}) && (! $VMinfo{$var})) + if (($VMnow{$var} != $VMold{$var}) && (! $VMinfo{$var})) { $VMinfo{$var} = $VMnow{$var} - $VMold{$var}; } - } + } select(undef, undef, undef, 0.1); $Kstat->update(); } @@ -152,7 +152,7 @@ sub config "Unalloc ", "Avail ", "\n"; - + print "Alloc.label Alloc \n"; print "Alloc.draw \n"; print "Alloc.info Swap used.\n"; diff --git a/plugins/swift/swift-dispersion b/plugins/swift/swift-dispersion index 99538b77..73f0d3c5 100755 --- a/plugins/swift/swift-dispersion +++ b/plugins/swift/swift-dispersion @@ -27,46 +27,46 @@ import json try: if sys.argv[1] == "config": - print 'graph_title Swift cluster dispersion' - print 'graph_category fs' + print('graph_title Swift cluster dispersion') + print('graph_category fs') - print 'object_missing_two.type GAUGE' - print 'object_missing_two.label Objects missing two copies' - print 'object_retries.type GAUGE' - print 'object_retries.label Objects retries' - print 'object_copies_expected.type GAUGE' - print 'object_copies_expected.label Objects copies expected' - print 'object_missing_one.type GAUGE' - print 'object_missing_one.label Objects missing one copy' - print 'object_copies_found.type GAUGE' - print 'object_copies_found.label Objects copies found' - print 'object_missing_all.type GAUGE' - print 'object_missing_all.label Objects missing all copies' - print 'object_overlapping.type GAUGE' - print 'object_overlapping.label Objects overlapping partitions' + print('object_missing_two.type GAUGE') + print('object_missing_two.label Objects missing two copies') + print('object_retries.type GAUGE') + print('object_retries.label Objects retries') + print('object_copies_expected.type GAUGE') + print('object_copies_expected.label Objects copies expected') + print('object_missing_one.type GAUGE') + print('object_missing_one.label Objects missing one copy') + print('object_copies_found.type GAUGE') + print('object_copies_found.label Objects copies found') + print('object_missing_all.type GAUGE') + print('object_missing_all.label Objects missing all copies') + print('object_overlapping.type GAUGE') + print('object_overlapping.label Objects overlapping partitions') - print 'container_missing_two.type GAUGE' - print 'container_missing_two.label Containers missing two copies' - print 'container_retries.type GAUGE' - print 'container_retries.label Containers retries' - print 'container_copies_expected.type GAUGE' - print 'container_copies_expected.label Containers copies expected' - print 'container_missing_one.type GAUGE' - print 'container_missing_one.label Containers missing one copy' - print 'container_copies_found.type GAUGE' - print 'container_copies_found.label Containers copies found' - print 'container_missing_all.type GAUGE' - print 'container_missing_all.label Containers missing all copies' - print 'container_overlapping.type GAUGE' - print 'container_overlapping.label Containers overlapping paritions' + print('container_missing_two.type GAUGE') + print('container_missing_two.label Containers missing two copies') + print('container_retries.type GAUGE') + print('container_retries.label Containers retries') + print('container_copies_expected.type GAUGE') + print('container_copies_expected.label Containers copies expected') + print('container_missing_one.type GAUGE') + print('container_missing_one.label Containers missing one copy') + print('container_copies_found.type GAUGE') + print('container_copies_found.label Containers copies found') + print('container_missing_all.type GAUGE') + print('container_missing_all.label Containers missing all copies') + print('container_overlapping.type GAUGE') + print('container_overlapping.label Containers overlapping paritions') sys.exit(0) except IndexError: pass -with os.popen("swift-dispersion-report -j %s" \ - % os.getenv("SWIFT_DISPERSION_CONFIG", "/etc/swift/dispersion.conf")) as report: +with os.popen("swift-dispersion-report -j %s" + % os.getenv("SWIFT_DISPERSION_CONFIG", "/etc/swift/dispersion.conf")) as report: stats = json.load(report) for type_, values in stats.iteritems(): for key, value in values.iteritems(): - print "%s_%s.value %d" % (type_, key, value) + print("%s_%s.value %d" % (type_, key, value)) diff --git a/plugins/syncthing/example-graphs/strelaysrv_-1.png b/plugins/syncthing/example-graphs/strelaysrv_-1.png new file mode 100644 index 00000000..ea26f57d Binary files /dev/null and b/plugins/syncthing/example-graphs/strelaysrv_-1.png differ diff --git a/plugins/syncthing/example-graphs/strelaysrv_-2.png b/plugins/syncthing/example-graphs/strelaysrv_-2.png new file mode 100644 index 00000000..69f583eb Binary files /dev/null and b/plugins/syncthing/example-graphs/strelaysrv_-2.png differ diff --git a/plugins/syncthing/example-graphs/strelaysrv_-3.png b/plugins/syncthing/example-graphs/strelaysrv_-3.png new file mode 100644 index 00000000..9c82f011 Binary files /dev/null and b/plugins/syncthing/example-graphs/strelaysrv_-3.png differ diff --git a/plugins/syncthing/example-graphs/strelaysrv_-4.png b/plugins/syncthing/example-graphs/strelaysrv_-4.png new file mode 100644 index 00000000..244e3eda Binary files /dev/null and b/plugins/syncthing/example-graphs/strelaysrv_-4.png differ diff --git a/plugins/syncthing/example-graphs/syncthing_-1.png b/plugins/syncthing/example-graphs/syncthing_-1.png new file mode 100644 index 00000000..a71e34d2 Binary files /dev/null and b/plugins/syncthing/example-graphs/syncthing_-1.png differ diff --git a/plugins/syncthing/example-graphs/syncthing_-2.png b/plugins/syncthing/example-graphs/syncthing_-2.png new file mode 100644 index 00000000..a333f2a5 Binary files /dev/null and b/plugins/syncthing/example-graphs/syncthing_-2.png differ diff --git a/plugins/syncthing/example-graphs/syncthing_-3.png b/plugins/syncthing/example-graphs/syncthing_-3.png new file mode 100644 index 00000000..a1704be9 Binary files /dev/null and b/plugins/syncthing/example-graphs/syncthing_-3.png differ diff --git a/plugins/syncthing/example-graphs/syncthing_-4.png b/plugins/syncthing/example-graphs/syncthing_-4.png new file mode 100644 index 00000000..41dc6637 Binary files /dev/null and b/plugins/syncthing/example-graphs/syncthing_-4.png differ diff --git a/plugins/syncthing/example-graphs/syncthing_-5.png b/plugins/syncthing/example-graphs/syncthing_-5.png new file mode 100644 index 00000000..73a640dd Binary files /dev/null and b/plugins/syncthing/example-graphs/syncthing_-5.png differ diff --git a/plugins/syncthing/strelaysrv_ b/plugins/syncthing/strelaysrv_ new file mode 100755 index 00000000..a5a31927 --- /dev/null +++ b/plugins/syncthing/strelaysrv_ @@ -0,0 +1,160 @@ +#!/bin/sh +: <<=cut +=head1 NAME +strelaysrv_ - Plugin to monitor Syncthing relay server + +=head1 DESCRIPTION +This plugin gathers metrics from a Syncthing relay server. + +This plugin requires the jq utility : https://stedolan.github.io/jq/ +This plugin requires the curl utility : https://curl.haxx.se/ + +Available plugins : +strelaysrv_goroutine # +strelaysrv_num # +strelaysrv_proxied # +strelaysrv_transfer # +strelaysrv_uptime # + +=head1 CONFIGURATION +To make the plugin connect to the Syncthing relay server one has to use this type of +configuration +[strelaysrv_*] + +env.syncthing_relaysrv_host 127.0.0.1 +env.syncthing_relaysrv_port 22070 + +=head1 AUTHOR +Pierre-Alain TORET + +=head1 LICENSE +MIT +=cut + +syncthing_relaysrv_host=${syncthing_relaysrv_host:-} +syncthing_relaysrv_port=${syncthing_relaysrv_port:-} + +getstatus() { + "$CURL" -s "http://$syncthing_relaysrv_host:$syncthing_relaysrv_port/status" +} + +num() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing relay numbers +graph_category network +graph_vlabel numbers +strelaysrv_num_sessions.label sessions +strelaysrv_num_connections.label connections +strelaysrv_num_pending.label pending session keys +strelaysrv_num_proxies.label proxies +EOM + exit 0;; + *) + STATUS=$(getstatus) + NS=$(echo "$STATUS" | $JQ '.numActiveSessions ') + NC=$(echo "$STATUS" | $JQ '.numConnections ') + NK=$(echo "$STATUS" | $JQ '.numPendingSessionKeys ') + NP=$(echo "$STATUS" | $JQ '.numProxies ') + printf 'strelaysrv_num_sessions.value %s\n' "$NS" + printf 'strelaysrv_num_connections.value %s\n' "$NC" + printf 'strelaysrv_num_pending.value %s\n' "$NK" + printf 'strelaysrv_num_proxies.value %s\n' "$NP" + esac +} + +uptime() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing relay uptime +graph_vlabel uptime in seconds +graph_category network +strelaysrv_uptime.label uptime +EOM + exit 0;; + *) + STATUS=$(getstatus) + UPTIME=$(echo "$STATUS" | "$JQ" '.uptimeSeconds') + printf 'strelaysrv_uptime.value %s\n' "$UPTIME" + esac +} + +goroutine() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing relay go routines +graph_vlabel number of go routines +graph_category network +strelaysrv_goroutine.label routines +EOM + exit 0;; + *) + STATUS=$(getstatus) + GOROUTINE=$(echo "$STATUS" | "$JQ" '.goNumRoutine') + printf 'strelaysrv_goroutine.value %s\n' "$GOROUTINE" + esac +} + +proxied() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing relay total proxied bits +graph_category network +graph_vlabel bits +graph_args --base 1000 +strelaysrv_proxied.label bits +strelaysrv_proxied.cdef strelaysrv_proxied,8,* +EOM + exit 0;; + *) + STATUS=$(getstatus) + BP=$(echo "$STATUS" | "$JQ" '.bytesProxied ') + printf 'strelaysrv_proxied.value %s\n' "$BP" + esac +} + +transfer() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing relay transfer rate +graph_category network +graph_vlabel bps +graph_args --base 1000 +strelaysrv_transfer.label bps +strelaysrv_transfer.cdef strelaysrv_transfer,1000,* +EOM + exit 0;; + *) + STATUS=$(getstatus) + TRANSFER=$(echo "$STATUS" | "$JQ" '.kbps10s1m5m15m30m60m[2] ') + printf 'strelaysrv_transfer.value %s\n' "$TRANSFER" + esac +} + +cd "$(dirname "$0")" || exit + +CURL=$(which curl) +JQ=$(which jq) + +case $(basename "$0") in + strelaysrv_num) + num "$1" + exit 0;; + strelaysrv_uptime) + uptime "$1" + exit 0;; + strelaysrv_goroutine) + goroutine "$1" + exit 0;; + strelaysrv_proxied) + proxied "$1" + exit 0;; + strelaysrv_transfer) + transfer "$1" + exit 0;; +esac diff --git a/plugins/syncthing/syncthing_ b/plugins/syncthing/syncthing_ new file mode 100755 index 00000000..05efc7dd --- /dev/null +++ b/plugins/syncthing/syncthing_ @@ -0,0 +1,171 @@ +#!/bin/sh +# -*- sh -*- +: <<=cut +=head1 NAME +syncthing_ - Plugin to monitor Syncthing server + +=head1 DESCRIPTION +This plugin gathers metrics from a Syncthing server. + +This plugin requires the jq utility : https://stedolan.github.io/jq/ +This plugin requires the cURL utility : https://curl.haxx.se/ + +Available plugins : +syncthing_cpu # +syncthing_mem # +syncthing_goroutine # +syncthing_transfer # +syncthing_uptime # + +=head1 CONFIGURATION +To make the plugin connect to the Syncthing server one has to use this type of +configuration +[syncthing_*] + +env.syncthing_apikey myapikey0123456789 +env.syncthing_host 127.0.0.1 +env.syncthing_port 8384 +env.syncthing_proto http + +=head1 AUTHOR +Pierre-Alain TORET + +=head1 LICENSE +MIT +=cut + +syncthing_apikey=${syncthing_apikey:-} +syncthing_proto=${syncthing_proto:-} +syncthing_host=${syncthing_host:-} +syncthing_port=${syncthing_port:-} + +getstatus() { + "$CURL" -s -X GET -H "X-API-Key: $syncthing_apikey" "$syncthing_proto://$syncthing_host:$syncthing_port/rest/system/status" +} + +cpu() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing server cpu usage +graph_args -u 100 +graph_vlabel % +graph_category network +syncthing_cpu.label cpu +EOM + exit 0;; + *) + STATUS=$(getstatus) + CPU=$(echo "$STATUS" | "$JQ" '.cpuPercent') + printf 'syncthing_cpu.value %s\n' "$CPU" + esac +} + +mem() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing server memory +graph_category network +graph_order syncthing_mem_all syncthing_mem_sys +graph_args --base 1000 +graph_vlabel bits +syncthing_mem_all.label allocated +syncthing_mem_all.cdef syncthing_mem_all,8,* +syncthing_mem_sys.label obtained +syncthing_mem_sys.cdef syncthing_mem_sys,8,* +EOM + exit 0;; + *) + STATUS=$(getstatus) + ALL=$(echo "$STATUS" | "$JQ" '.alloc') + SYS=$(echo "$STATUS" | "$JQ" '.sys') + printf 'syncthing_mem_all.value %s\n' "$ALL" + printf 'syncthing_mem_sys.value %s\n' "$SYS" + esac +} + +uptime() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing server uptime +graph_vlabel uptime in seconds +graph_category network +syncthing_uptime.label uptime +EOM + exit 0;; + *) + STATUS=$(getstatus) + UPTIME=$(echo "$STATUS" | "$JQ" '.uptime') + printf 'syncthing_uptime.value %s\n' "$UPTIME" + esac +} + +goroutine() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing server go routines +graph_vlabel number of go routines +graph_category network +syncthing_goroutine.label routines +EOM + exit 0;; + *) + STATUS=$(getstatus) + GOROUTINES=$(echo "$STATUS" | "$JQ" '.goroutines') + printf 'syncthing_goroutine.value %s\n' "$GOROUTINES" + esac +} + +transfer() { + case $1 in + config) + cat <<'EOM' +graph_title Syncthing server total transfer +graph_category network +graph_order syncthing_transfer_down syncthing_transfer_up +graph_args --base 1000 +graph_vlabel bits in (-) / out (+) per ${graph_period} +syncthing_transfer_down.label received +syncthing_transfer_down.type COUNTER +syncthing_transfer_down.graph no +syncthing_transfer_down.cdef syncthing_transfer_down,8,* +syncthing_transfer_up.label bps +syncthing_transfer_up.type COUNTER +syncthing_transfer_up.negative syncthing_transfer_down +syncthing_transfer_up.cdef syncthing_transfer_up,8,* +EOM + exit 0;; + *) + CONNECTIONS=$("$CURL" -s -X GET -H "X-API-Key: $syncthing_apikey" "$syncthing_proto://$syncthing_host:$syncthing_port/rest/system/connections") + IBT=$(echo "$CONNECTIONS" | "$JQ" '.total | .inBytesTotal') + OBT=$(echo "$CONNECTIONS" | "$JQ" '.total | .outBytesTotal') + printf 'syncthing_transfer_up.value %s\n' "$IBT" + printf 'syncthing_transfer_down.value %s\n' "$OBT" + esac +} + +cd "$(dirname "$0")" || exit + +CURL=$(which curl) +JQ=$(which jq) + +case $(basename "$0") in + syncthing_cpu) + cpu "$1" + exit 0;; + syncthing_mem) + mem "$1" + exit 0;; + syncthing_uptime) + uptime "$1" + exit 0;; + syncthing_goroutine) + goroutine "$1" + exit 0;; + syncthing_transfer) + transfer "$1" + exit 0;; +esac diff --git a/plugins/synology/snmp__synology b/plugins/synology/snmp__synology index 00f98d95..dda582d8 100755 --- a/plugins/synology/snmp__synology +++ b/plugins/synology/snmp__synology @@ -1,5 +1,4 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- +#!/usr/bin/env python3 # Copyright (C) 2014 Johann Schmitz # @@ -29,8 +28,8 @@ has been configured correctly. =head1 MAGIC MARKERS - #%# family=snmpauto - #%# capabilities=snmpconf + #%# family=snmpauto + #%# capabilities=snmpconf =head1 VERSION @@ -63,75 +62,77 @@ disktable_model = '1.3.6.1.4.1.6574.2.1.1.3' disktable_temp = '1.3.6.1.4.1.6574.2.1.1.6' sys_temperature = '1.3.6.1.4.1.6574.1.2.0' + class SynologySNMPClient(object): - def __init__(self, host, port, community): - self.hostname = host - self.transport = cmdgen.UdpTransportTarget((host, int(port))) - self.auth = cmdgen.CommunityData('test-agent', community) - self.gen = cmdgen.CommandGenerator() + def __init__(self, host, port, community): + self.hostname = host + self.transport = cmdgen.UdpTransportTarget((host, int(port))) + self.auth = cmdgen.CommunityData('test-agent', community) + self.gen = cmdgen.CommandGenerator() - def _get_disks(self): - disk_table = '1.3.6.1.4.1.6574.2.1' - errorIndication, errorStatus, errorIndex, varBindTable = self.gen.bulkCmd( - self.auth, - self.transport, - 0, 24, - disk_table) + def _get_disks(self): + disk_table = '1.3.6.1.4.1.6574.2.1' + errorIndication, errorStatus, errorIndex, varBindTable = self.gen.bulkCmd( + self.auth, + self.transport, + 0, 24, + disk_table) - if errorIndication: - logging.error("SNMP bulkCmd for devices failed: %s, %s, %s" % (errorIndication, errorStatus, errorIndex)) - return + if errorIndication: + logging.error("SNMP bulkCmd for devices failed: %s, %s, %s", + errorIndication, errorStatus, errorIndex) + return - devices = {} - for row in varBindTable: - for oid, value in row: - oid = str(oid) - if not oid.startswith(disk_table): - continue + devices = {} + for row in varBindTable: + for oid, value in row: + oid = str(oid) + if not oid.startswith(disk_table): + continue - disk_id = oid[oid.rindex('.')+1:] + disk_id = oid[oid.rindex('.') + 1:] - values = devices.get(disk_id, [None, None, None]) - if oid.startswith(disktable_id): - values[0] = str(value).strip() - if oid.startswith(disktable_model): - values[1] = str(value).strip() - if oid.startswith(disktable_temp): - values[2] = int(value) - devices[disk_id] = values + values = devices.get(disk_id, [None, None, None]) + if oid.startswith(disktable_id): + values[0] = str(value).strip() + if oid.startswith(disktable_model): + values[1] = str(value).strip() + if oid.startswith(disktable_temp): + values[2] = int(value) + devices[disk_id] = values - for x in sorted(devices.keys()): - yield tuple([x] + devices[x]) + for x in sorted(devices.keys()): + yield tuple([x] + devices[x]) - def _get_sys_temperature(self): - errorIndication, errorStatus, errorIndex, varBindTable = self.gen.getCmd( - self.auth, - self.transport, - sys_temperature) + def _get_sys_temperature(self): + errorIndication, errorStatus, errorIndex, varBindTable = self.gen.getCmd( + self.auth, + self.transport, + sys_temperature) - if errorIndication: - logging.error("SNMP getCmd for %s failed: %s, %s, %s" % (sys_temperature, errorIndication, errorStatus, errorIndex)) - return None + if errorIndication: + logging.error("SNMP getCmd for %s failed: %s, %s, %s", + sys_temperature, errorIndication, errorStatus, errorIndex) + return None - return int(varBindTable[0][1]) + return int(varBindTable[0][1]) - def print_config(self): - print """multigraph synology_hdd_temperatures + def print_config(self): + print("""multigraph synology_hdd_temperatures host_name {hostname} graph_title HDD temperatures on {hostname} graph_vlabel Temperature in °C graph_args --base 1000 graph_category sensors -graph_info HDD temperatures on {hostname}""".format(hostname=self.hostname) +graph_info HDD temperatures on {hostname}""".format(hostname=self.hostname)) - for id, name, model, temp in self._get_disks(): - print """disk{disk_id}.info Temperature of {name} ({model}) + for id, name, model, temp in self._get_disks(): + print("""disk{disk_id}.info Temperature of {name} ({model}) disk{disk_id}.label {name} ({model}) disk{disk_id}.type GAUGE -disk{disk_id}.min 0""".format(disk_id=id, name=name, model=model) +disk{disk_id}.min 0""".format(disk_id=id, name=name, model=model)) - - print """multigraph synology_sys_temperature + print("""multigraph synology_sys_temperature host_name {hostname} graph_title System temperatures of {hostname} graph_vlabel Temperature in °C @@ -142,15 +143,15 @@ sys_temp.info System temperature sys_temp.label Temperature sys_temp.type GAUGE sys_temp.min 0 -""".format(hostname=self.hostname) +""".format(hostname=self.hostname)) - def execute(self): - print """multigraph synology_hdd_temperatures""" - for id, name, model, temp in self._get_disks(): - print """disk{disk_id}.value {temp}""".format(disk_id=id, temp=temp) + def execute(self): + print("""multigraph synology_hdd_temperatures""") + for id, name, model, temp in self._get_disks(): + print("""disk{disk_id}.value {temp}""".format(disk_id=id, temp=temp)) - print """multigraph synology_sys_temperature""" - print "sys_temp.value {temp}".format(temp=self._get_sys_temperature()) + print("""multigraph synology_sys_temperature""") + print("sys_temp.value {temp}".format(temp=self._get_sys_temperature())) host = None @@ -159,30 +160,31 @@ community = os.getenv('community', None) debug = bool(os.getenv('MUNIN_DEBUG', os.getenv('DEBUG', 0))) if debug: - logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-7s %(message)s') + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-7s %(message)s') try: - match = re.search("^(?:|.*\/)snmp_([^_]+)_synology$", sys.argv[0]) - host = match.group(1) - match = re.search("^([^:]+):(\d+)$", host) - if match is not None: - host = match.group(1) - port = match.group(2) + match = re.search(r"^(?:|.*\/)snmp_([^_]+)_synology$", sys.argv[0]) + host = match.group(1) + match = re.search(r"^([^:]+):(\d+)$", host) + if match is not None: + host = match.group(1) + port = match.group(2) except Exception as ex: - logging.error("Caught exception: %s" % ex) + logging.error("Caught exception: %s" % ex) if "snmpconf" in sys.argv[1:]: - print "require 1.3.6.1.4.1.6574.2.1.1" - sys.exit(0) + print("require 1.3.6.1.4.1.6574.2.1.1") + sys.exit(0) else: - if not (host and port and community): - print "# Bad configuration. Cannot run with Host=%s, port=%s and community=%s" % (host, port, community) - sys.exit(1) + if not (host and port and community): + print("# Bad configuration. Cannot run with Host=%s, port=%s and community=%s" + % (host, port, community)) + sys.exit(1) - c = SynologySNMPClient(host, port, community) + c = SynologySNMPClient(host, port, community) - if "config" in sys.argv[1:]: - c.print_config() - else: - c.execute() + if "config" in sys.argv[1:]: + c.print_config() + else: + c.execute() diff --git a/plugins/synology/snmp__synology_hddtemp b/plugins/synology/snmp__synology_hddtemp old mode 100644 new mode 100755 index 57931c3d..17310197 --- a/plugins/synology/snmp__synology_hddtemp +++ b/plugins/synology/snmp__synology_hddtemp @@ -4,7 +4,7 @@ =head1 NAME -snmp__syno_hddtemp - Munin plugin to monitor the temperature of +snmp__syno_hddtemp - Munin plugin to monitor the temperature of harddisks in an Synology NAS. =head1 APPLICABLE SYSTEMS @@ -33,7 +33,7 @@ The temperature of each disk installed in °C. =head1 MIB INFORMATION -This plugin requires support for the synoDisk. It reports +This plugin requires support for the synoDisk. It reports the temperature of the installed disks. =head1 MAGIC MARKERS diff --git a/plugins/synology/snmp__synology_temperature b/plugins/synology/snmp__synology_temperature old mode 100644 new mode 100755 index 522dfa75..420bfbea --- a/plugins/synology/snmp__synology_temperature +++ b/plugins/synology/snmp__synology_temperature @@ -4,7 +4,7 @@ =head1 NAME -snmp__syno_temperature - Munin plugin to retrieve current temperature from a +snmp__syno_temperature - Munin plugin to retrieve current temperature from a Synology NAS. =head1 APPLICABLE SYSTEMS @@ -86,6 +86,6 @@ my $session = Munin::Plugin::SNMP->session(-translate => my $temp = $session->get_single (".1.3.6.1.4.1.6574.1.2.0") || 'ERROR'; -print "Retrived uptime is '$temp'\n" if $Munin::Plugin::SNMP::DEBUG; +print "Retrieved uptime is '$temp'\n" if $Munin::Plugin::SNMP::DEBUG; print "temp.value ", $temp, "\n"; diff --git a/plugins/synology/snmp__synology_ups b/plugins/synology/snmp__synology_ups old mode 100644 new mode 100755 index 0b1ca5cb..c975f85b --- a/plugins/synology/snmp__synology_ups +++ b/plugins/synology/snmp__synology_ups @@ -4,7 +4,7 @@ =head1 NAME -snmp__syno_ups - Munin plugin to retrieve various information of the +snmp__syno_ups - Munin plugin to retrieve various information of the UPS attached to a Synology NAS. =head1 APPLICABLE SYSTEMS @@ -95,8 +95,8 @@ $charge = unpack "f", reverse pack "H*", $charge; my $load = $session->get_single (".1.3.6.1.4.1.6574.4.2.12.1.0") || 'ERROR'; $load = unpack "f", reverse pack "H*", $load; -print "Retrived charge '$charge'\n" if $Munin::Plugin::SNMP::DEBUG; -print "Retrived load '$load'\n" if $Munin::Plugin::SNMP::DEBUG; +print "Retrieved charge '$charge'\n" if $Munin::Plugin::SNMP::DEBUG; +print "Retrieved load '$load'\n" if $Munin::Plugin::SNMP::DEBUG; print "charge.value ", $charge, "\n"; print "load.value ", $load, "\n"; diff --git a/plugins/syslog/syslog_ng_stats b/plugins/syslog/syslog_ng_stats old mode 100644 new mode 100755 diff --git a/plugins/system/auth b/plugins/system/auth index 7edd39ad..5782cf68 100755 --- a/plugins/system/auth +++ b/plugins/system/auth @@ -1,5 +1,5 @@ #!/bin/bash -# +# # A Munin Plugin to show auth stuff # Created by Dominik Schulz # http://developer.gauner.org/munin/ diff --git a/plugins/system/blockhosts b/plugins/system/blockhosts index 652752e5..11945533 100755 --- a/plugins/system/blockhosts +++ b/plugins/system/blockhosts @@ -2,7 +2,7 @@ # Plugin to monitor the number of hosts denied by BlockHosts # # $Log$ -# based on: +# based on: # denyhosts plugin # Revision 1.0 2009/06/05 16:00:00 tjansson # @@ -11,21 +11,20 @@ # Parameters: # config (required) # autoconf (optional - used by munin-config) - + LOG=/etc/hosts.allow if [ "$1" = "autoconf" ]; then if [ -r "$LOG" ]; then echo yes - exit 0 else echo no - exit 1 fi + exit 0 fi - + if [ "$1" = "config" ]; then - + echo 'graph_title Hosts denied by BlockHosts' echo 'graph_args -l 0' echo 'graph_vlabel denied hosts ' @@ -34,6 +33,6 @@ if [ "$1" = "config" ]; then echo 'HostsWatched.label Hosts watched by BlockHosts' exit 0 fi - + echo HostsDenied.value `egrep -c " : deny" $LOG` echo HostsWatched.value `egrep -c "#bh: ip:" $LOG` diff --git a/plugins/system/cpu_linux_multi b/plugins/system/cpu_linux_multi index 2a27f696..a3363451 100755 --- a/plugins/system/cpu_linux_multi +++ b/plugins/system/cpu_linux_multi @@ -2,18 +2,18 @@ ######################################################################## # Copyright (c) 2012, Adrien Urban # All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are -# met: -# +# met: +# # 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. +# notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the -# distribution. -# +# distribution. +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -142,7 +142,6 @@ my $fields_info = [ sub pidfile() { "$ENV{MUNIN_PLUGSTATE}/munin.$plugin.pid" } sub cachefile() { "$ENV{MUNIN_PLUGSTATE}/munin.$plugin.cache" } -sub graph_section() { "system:cpu" }; sub graph_name() { "cpu_extended_multi_1s" }; sub graph_title() { "CPU usage" }; sub graph_title_all() { "Overall CPU usage" }; @@ -335,7 +334,7 @@ sub run_config() { print < (based on disk/log_sizes) =head1 LICENSE -GPLv2 +SPDX-License-Identifier: GPL-2.0-only =head1 MAGIC MARKERS @@ -32,19 +36,20 @@ GPLv2 # needs shellcheck -x /usr/share/munin/plugins/plugin.sh # shellcheck source=/usr/share/munin/plugins/plugin.sh -. "$MUNIN_LIBDIR/plugins/plugin.sh" +. "${MUNIN_LIBDIR}/plugins/plugin.sh" -NAME=$(echo "$0" | sed 's/.*_//') -TITLE=${title:-File lengths for $NAME} +NAME=$(echo "${0}" | sed 's/.*_//') +TITLE=${title:-File lengths for ${NAME}} CATEGORY=${category:-system} FILES=${files:-/var/log/messages} # we want globbing to happen here # shellcheck disable=SC2116 disable=SC2086 -FILES=$(echo $FILES) +FILES=$(echo ${FILES}) +TYPE=${type:-GAUGE} -if [ "$1" = "config" ] ; then +if [ "${1}" = "config" ] ; then # shellcheck disable=SC2154 if [ "${logarithmic}" = "1" ]; then graph_args="-o" @@ -55,20 +60,32 @@ if [ "$1" = "config" ] ; then graph_title ${TITLE} graph_args ${graph_args} --base 1000 graph_category ${CATEGORY} -graph_info This graph shows the length of ${FILES} -graph_vlabel length (lines) EOF +if [ "${TYPE}" = "GAUGE" ]; then + echo "graph_info This graph shows the length of ${FILES}" + echo "graph_vlabel length (lines)" +else + echo "graph_info This graph shows the addition of new lines in ${FILES}" + # ${graph_period} is not a shell variable + # shellcheck disable=SC2016 + echo 'graph_vlabel new lines per ${graph_period}' +fi - for F in $FILES; do - MF=$(clean_fieldname "$F") - BF=$(basename "$F") - echo "$MF.label ${BF}" + for F in ${FILES}; do + MF=$(clean_fieldname "${F}") + BF=$(basename "${F}") + echo "${MF}.label ${BF}" + echo "${MF}.type ${TYPE}" + echo "${MF}.min 0" done else - for F in $FILES; do - MF=$(echo "$F" | sed 's/[-\/\.]/_/g') - echo "$MF.value $(wc -l < "$F")" - echo "$MF.extinfo $(stat --printf="%sB\n" "$F")" + HAS_GNU_STAT=$(stat --help 2>&1| grep GNU) + for F in ${FILES}; do + MF=$(echo "${F}" | sed 's/[-\/\.]/_/g') + echo "${MF}.value $(wc -l < "${F}")" + if [ -n "${HAS_GNU_STAT}" ]; then + echo "${MF}.extinfo $(stat --printf="%sB\n" "${F}")" + fi done fi diff --git a/plugins/system/fresh-backups b/plugins/system/fresh-backups deleted file mode 100755 index e1215458..00000000 --- a/plugins/system/fresh-backups +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/sh - -: << =cut - -=head1 NAME - -fresh-backups - Plugin to monitor the freshness of backup files - -=head1 APPLICABLE SYSTEMS - -Any system with some automated backup creating or updating archive files. - -This works well with backup-manager. - -=head1 CONFIGURATION - -The following example checks all tar.bz2 files in /path/to/your/backups/, and -counts all those that are less than 2 days old, and there should be 4 separate -daily archives. - - [fresh-backups] - user root - env.backup_dir /path/to/your/backups/ - env.lifetime 2 - env.archive_pattern *.tar.bz2 - env.backup_number 4 - -This will also set the warning and critical values for this plugin to 2*4 and -4, respectively, meaning that if the number of fresh files goes below those -limits, the relevant notifications will be triggerred. - -An example configuration snippet for backup-manager [0] follows. - - export BM_REPOSITORY_ROOT="/path/to/your/backups" - export BM_TARBALL_FILETYPE="tar.bz2" - export BM_TARBALL_DIRECTORIES="/etc /home /srv /data" - -[0] https://github.com/sukria/Backup-Manager - -=head1 AUTHOR - -Olivier Mehani - -=head1 LICENSE - -GPLv2 - -=head1 MAGIC MARKERS - - #%# family=manual - -=cut - -# Configuration directives, edit before first use. -BACKUP_DIR=${backup_dir:-/data/backup} -ARCHIVE_PATTERN="${archive_pattern:-*.tar.bz2}" -# How old backups should be considered as non-yound anymore in [days]. -LIFETIME=${lifetime:-2} -# Critical states will be issued when the number of fresh backups archives is below `backup_number`, -# and warnings below `backup_number*lifetime` -CRIT=${backup_number:-1} -WARN=$((CRIT*LIFETIME)) - -# The situation is critical if there are no young files, the backup is down. -case $1 in - config) - cat << EOF -graph_title Fresh (<=${LIFETIME}d) backups archives in ${BACKUP_DIR} -graph_vlabel number -graph_args -l 0 -graph_category backup -freshcount.label number -freshcount.critical ${CRIT}: -freshcount.warning ${WARN}: -EOF - exit 0;; -esac - -printf "freshcount.value " -find "${BACKUP_DIR}" -name "${ARCHIVE_PATTERN}" -a -mtime "-${LIFETIME}" | wc -l -printf "freshcount.extinfo " -du -sh "${BACKUP_DIR}" diff --git a/plugins/system/fresh-backups b/plugins/system/fresh-backups new file mode 120000 index 00000000..72776f8c --- /dev/null +++ b/plugins/system/fresh-backups @@ -0,0 +1 @@ +../backup/fresh-backups \ No newline at end of file diff --git a/plugins/system/hugepages b/plugins/system/hugepages index 78eca8f5..40d1b4e3 100755 --- a/plugins/system/hugepages +++ b/plugins/system/hugepages @@ -9,7 +9,7 @@ # transparent_hugepages kernel command line parameter). All values are # shown in (KiBi/MeBi/GiBi)Bytes. # -# This plugin is used like many other munin plugins: put it in +# This plugin is used like many other munin plugins: put it in # /usr/share/munin/plugins (or another appropriate location) # and create a symlink in /etc/munin/plugins: # > ln -s /usr/share/munin/plugins/hugepages /etc/munin/plugins diff --git a/plugins/system/irq b/plugins/system/irq index 31f613e4..8b4bb67e 100755 --- a/plugins/system/irq +++ b/plugins/system/irq @@ -114,7 +114,7 @@ my $irq_types = 'sirq' => 'Software interrupts' }; -my $irq_descriptions = +my $irq_descriptions = { 'HI' => 'High priority tasklets', 'TIMER' => 'Timer bottom half', @@ -127,7 +127,7 @@ my $irq_descriptions = # ----------------- main ---------------- need_multigraph(); # -- autoconf -- -if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) +if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) { printf("%s\n", (-e "/proc/interrupts" and -e "/proc/softirqs") ? "yes" : "no (stats not exists)"); exit (0); @@ -309,15 +309,15 @@ sub prepare_graphs $gr->{'irq'}{'fields'}{$down_field_name}{'type'} = $fields->{'irq_sirq'}{'type'}; $gr->{'irq'}{'fields'}{$up_field_name}{'draw'} = $fields->{'irq_sirq'}{'draw'}; $gr->{'irq'}{'fields'}{$down_field_name}{'draw'} = $fields->{'irq_sirq'}{'draw'}; - + $gr->{'irq'}{'fields'}{$up_field_name}{'label'} = replace($fields->{'irq_sirq'}{'label'}, ':cpu:', $i); $gr->{'irq'}{'fields'}{$up_field_name}{'info'} = replace($fields->{'irq_sirq'}{'info'} , ':cpu:', $i); $gr->{'irq'}{'fields'}{$down_field_name}{'label'} = 'NaN'; $gr->{'irq'}{'fields'}{$down_field_name}{'info'} = 'NaN'; - + $gr->{'irq'}{'fields'}{$up_field_name}{'negative'} = $down_field_name; $gr->{'irq'}{'fields'}{$down_field_name}{'graph'} = 'no'; - + # --- child graphs --- for my $irq_type (qw(irq sirq)) { @@ -335,7 +335,7 @@ sub prepare_graphs my @irq_names = keys %{$IRQi->{'stat'}{$irq_type}{$i}}; # names split for better sorting for my $irq_name (( - (sort {int $a <=> int $b} grep{/^\d/} @irq_names), + (sort {int $a <=> int $b} grep{/^\d/} @irq_names), (sort grep{!/(^\d|ERR|MIS)/} @irq_names), (sort grep{/(ERR|MIS)/ } @irq_names) )) @@ -343,11 +343,11 @@ sub prepare_graphs my $field_name = clean_fieldname(sprintf("irq_%s", $irq_name)); append_cpu_limit($limits, $gr, $irq_type, $field_name, $graph_name, $i, $irq_name); $gr->{$graph_name}{'graph'}{'order'} .= ' '.$field_name; - for my $fo (qw(label info)) + for my $this_field (qw(label info)) { - $gr->{$graph_name}{'fields'}{$field_name}{$fo} = replace($fields->{'irq'}{$fo}, ':irq:', $irq_name); - $gr->{$graph_name}{'fields'}{$field_name}{$fo} = replace($gr->{$graph_name}{'fields'}{$field_name}{$fo}, - ':irqinfo:', + $gr->{$graph_name}{'fields'}{$field_name}{$this_field} = replace($fields->{'irq'}{$this_field}, ':irq:', $irq_name); + $gr->{$graph_name}{'fields'}{$field_name}{$this_field} = replace($gr->{$graph_name}{'fields'}{$field_name}{$this_field}, + ':irqinfo:', exists($IRQi->{'description'}{$irq_type}{$irq_name}) ? $IRQi->{'description'}{$irq_type}{$irq_name} : ''); @@ -368,7 +368,7 @@ sub print_config { printf("multigraph %s\n", $g); for my $go (sort keys %{$config->{$g}{'graph'}}) { printf("graph_%s %s\n", $go, $config->{$g}{'graph'}{$go}); } - for my $f (sort keys %{$config->{$g}{'fields'}}) { for my $fo (sort keys %{$config->{$g}{'fields'}{$f}}) { printf("%s.%s %s\n", $f, $fo, $config->{$g}{'fields'}{$f}{$fo}); } } + for my $f (sort keys %{$config->{$g}{'fields'}}) { for my $this_field (sort keys %{$config->{$g}{'fields'}{$f}}) { printf("%s.%s %s\n", $f, $this_field, $config->{$g}{'fields'}{$f}{$this_field}); } } print "\n"; } } diff --git a/plugins/system/pagefaults_by_process b/plugins/system/pagefaults_by_process index 9396d42c..22eedcd0 100755 --- a/plugins/system/pagefaults_by_process +++ b/plugins/system/pagefaults_by_process @@ -18,7 +18,7 @@ # that die may not appear on the graph, and anyway their last chunk of # CPU usage before they died is lost. You could modify this plugin to # read SAR/psacct records if you care about that. -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, diff --git a/plugins/system/raminfo b/plugins/system/raminfo index d0d0f77d..4f513c42 100755 --- a/plugins/system/raminfo +++ b/plugins/system/raminfo @@ -7,7 +7,7 @@ # RAM Used Anon, Exec + Libs, Page cache # RAM Avail Free memory that can be immediately used # -# Core logic developed by Brendan Gregg. +# Core logic developed by Brendan Gregg. # REFERENCE: http://www.brendangregg.com/k9toolkit.html - the swap diagram. # # COPYRIGHT: Copyright (c) 2004 Brendan Gregg. @@ -39,7 +39,7 @@ raminfo - Plugin for monitoring memory usage =head1 AUTHOR Christian Braum, chrisi_braum@web.de - + Core logic developed by Brendan Gregg. See K9Toolkit: http://www.brendangregg.com/K9Toolkit/swapinfo @@ -103,7 +103,7 @@ sub value # get value for variables my %VMinfo; my %VMold; - foreach my $count (0..12) + foreach my $count (0..12) { # # The values are counters that increment each second, here we @@ -111,7 +111,7 @@ sub value # get value for variables # (reading them once then again a second later was not reliable). # - foreach my $var ( "freemem" ) + foreach my $var ( "freemem" ) { $VMnow{$var} = $Kstat->{unix}->{0}->{vminfo}->{$var}; unless ($count) { @@ -148,18 +148,18 @@ sub value # get value for variables my $ram_used = $pagestotal - $freemem - $ram_kernel - $ram_locked; ### format values - + my $freemem_B = sprintf( "%d ", $freemem * $PAGETOBYTE ); my $pp_kernel_B = sprintf( "%d ", $pp_kernel * $PAGETOBYTE ); my $pageslocked_B = sprintf( "%d ", $pageslocked * $PAGETOBYTE ); my $pagestotal_B = sprintf( "%d ", $pagestotal * $PAGETOBYTE ); my $ram_unusable_B = sprintf( "%d ", $ram_unusable * $PAGETOBYTE ); - my $ram_kernel_B = sprintf( "%d ", $ram_kernel * $PAGETOBYTE ); + my $ram_kernel_B = sprintf( "%d ", $ram_kernel * $PAGETOBYTE ); my $ram_locked_B = sprintf( "%d ", $ram_locked * $PAGETOBYTE ); my $ram_used_B = sprintf( "%d ", $ram_used * $PAGETOBYTE ); my $ram_total_B = sprintf( "%d ", $ram_total * $PAGETOBYTE ); - # --- assign the variables --- + # --- assign the variables --- $h_ramvalues{"Unusable.value"} = "$ram_unusable_B"; $h_ramvalues{"Kernel.value"} = "$ram_kernel_B"; $h_ramvalues{"Locked.value"} = "$ram_locked_B"; @@ -201,7 +201,7 @@ sub config # print config message and exit. "Used ", "Avail ", "\n"; - + print "Unusable.label Unusable \n"; print "Unusable.draw AREA \n"; print "Unusable.info RAM consumed by the OBP and TSBs.\n"; diff --git a/plugins/system/read_serial_temperature b/plugins/system/read_serial_temperature index 4fef783a..8329ef9e 100755 --- a/plugins/system/read_serial_temperature +++ b/plugins/system/read_serial_temperature @@ -1,8 +1,8 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Paul Wiegmans (p.wiegmans@bonhoeffer.nl) 2009 dec 18 -This munin-node plugin reads a temperature value from a serial port, +This munin-node plugin reads a temperature value from a serial port, provided by a Arduino with temperature sensor. For details see: http://amber.bonhoeffer.nl/temperatuur/ @@ -12,42 +12,39 @@ Linux: "/dev/usb/ttyUSB[n]" or "/dev/ttyUSB[n]" e.g. "/dev/usb/ttyUSB0" """ -import sys, serial +import sys + +import serial + # Open named port at "19200,8,N,1", 1s timeout:: - def gettemperature(): - ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1) - t = 0 - while t<1: - line = ser.readline().strip() - if line: - temp = str(line.split(" ")[0]) # temperature in tenths celsius - i = len(temp)-1 - temp = temp[:i] # return only integer value (as a string) - return temp - t += 1 - ser.close() + connection = serial.Serial('/dev/ttyUSB0', 9600, timeout=1) + # default to munin's value "unknown" + result = "U" + while True: + line = connection.readline().strip() + if line: + # temperature in tenths celsius + temp = str(line.split(" ")[0]) + # return only integer value (as a string) + result = temp[:-1] + break + connection.close() + return result -# shamelessly copied from weather_temp_ +# shamelessly copied from weather_temp_ if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - - print "yes" - + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": - - print 'graph_title Temperatuur in de serverruimte' - print 'graph_vlabel temperature in C' - print 'graph_category sensors' - print 'temperature.label temperature' - print 'graph_info Dit is de temperatuur in het rek in de serverruimte B104' - print 'graph_scale no' - # lower limit 10, upper limit 50 - print 'graph_args --base 1000 -l 10 -u 50' - + print('graph_title Temperatuur in de serverruimte') + print('graph_vlabel temperature in C') + print('graph_category sensors') + print('temperature.label temperature') + print('graph_info Dit is de temperatuur in het rek in de serverruimte B104') + print('graph_scale no') + # lower limit 10, upper limit 50 + print('graph_args --base 1000 -l 10 -u 50') else: - - print 'temperature.value %s' % gettemperature() - - + print('temperature.value %s' % gettemperature()) diff --git a/plugins/system/top-memory b/plugins/system/top-memory index 85ee490e..6dd19836 100755 --- a/plugins/system/top-memory +++ b/plugins/system/top-memory @@ -8,7 +8,7 @@ # Set categories on a bunch of plugins # # Revision 1.3 2004/05/15 21:33:30 jimmyo -# "Upped" som plugins from contrib/manual to manual or auto. +# "Upped" some plugins from contrib/manual to manual or auto. # # Revision 1.2 2004/05/09 20:42:08 jimmyo # Fixed problem with sunos/memory, when memory was reported in gigabytes (SF#930964). diff --git a/plugins/system/total_by_process_ b/plugins/system/total_by_process_ index af4b4003..d4a9b825 100755 --- a/plugins/system/total_by_process_ +++ b/plugins/system/total_by_process_ @@ -18,7 +18,7 @@ # that die may not appear on the graph, and anyway their last chunk of # CPU usage before they died is lost. You could modify this plugin to # read SAR/psacct records if you care about that. -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, @@ -32,7 +32,7 @@ $scriptname =~ s|.*/||; my $fieldname = ($scriptname =~ /^total_by_process_(.*)_(.*)/) ? $1 : undef; my $fieldtype = ($scriptname =~ /^total_by_process_(.*)_(.*)/) ? $2 : undef; -if (@ARGV and $ARGV[1] eq "suggest") +if (defined($ARGV[0]) and $ARGV[0] eq "suggest") { system("ps L | cut -d' ' -f1"); exit(0); @@ -86,7 +86,7 @@ foreach my $process (keys %total_by_process) close(PS); -if (@ARGV and $ARGV[1] == "config") +if (defined($ARGV[0]) and $ARGV[0] eq "config") { print <): + +1. Global include rules are applied on the output of C; +2. Global exclude rules are then applied to the result of that; +3. Then, for each state, this global output is further filtered by include, then exclude rules for the state; +4. Then the result is filtered for the given state and the remaining units counted and listed. + +An example configuration might be something like this: + +=over 2 + + [systemd_units] + env.failed_warning 0 + env.failed_critical 5 + env.inactive_warning 10 + env.inactive_critical 20 + env.exclude boring + env.inactive_exclude sleepy + env.silence_active_extinfo 1 + +=back + +In the example above, we've overridden the default warning and critical levels for failed units, added warning +and critical levels for inactive units, then filtered out boring units from all results and filtered out sleepy +units from results for the inactive state. In addition to that, only more extensive info of non-active units +are displayed in order to quickly see which units are failing and why in the webui. (By default, all extra info +about all units is displayed.) =head1 AUTHOR Olivier Mehani +Kael Shipman =head1 LICENSE @@ -30,15 +66,27 @@ GPLv2 =cut +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +failed_warning="${failed_warning:-0}" +failed_critical="${failed_critical:-10}" +silence_active_extinfo="${silence_active_extinfo:-0}" + states="active \ reloading \ inactive \ failed \ activating \ deactivating" + +include="${include:-.*}" +exclude="${exclude:-^$}" + autoconf() { which systemctl >/dev/null && \ - systemctl --state=failed --no-pager --no-legend >/dev/null 2>&1 && echo yes || echo "no (No systemctl or error running it)" + systemctl --state=failed --no-pager --no-legend >/dev/null 2>&1 && \ + echo yes || \ + echo "no (No systemctl or error running it)" } config () { @@ -52,21 +100,45 @@ EOF for state in $states; do echo "$state.label $state" echo "$state.draw AREASTACK" - if [ "$state" = "failed" ]; then - echo "$state.warning 0" - echo "$state.critical 10" - fi + print_warning $state + print_critical $state done } fetch () { - tmp=$(systemctl --no-pager --no-legend --all | awk '{print $1, $3}') + # Get all units, filtering by global include/exclude rules + local state_include_var state_include state_exclude_var state_exclude global_unit_list state_unit_list + global_unit_list=$(systemctl --no-pager --no-legend --all | grep -E "$include" | grep -Ev "$exclude" | awk '{print $1, $3}') + + # For each state, echo the number of units and some extra info, filtering for state-specific include/excludes for state in $states ; do - count=$(echo "$tmp" | grep -c "$state$") + # Get state-specific include/excludes, if present + state_include_var="${state}_include" + state_include="${!state_include_var}" + state_exclude_var="${state}_exclude" + state_exclude="${!state_exclude_var}" + state_unit_list="$global_unit_list" + + # Filter + if [ -n "$state_include" ]; then + state_unit_list="$(echo "$state_unit_list" | grep -E "$state_include")" + fi + if [ -n "$state_exclude" ]; then + state_unit_list="$(echo "$state_unit_list" | grep -Ev "$state_exclude")" + fi + + # Count and output + count=$(echo "$state_unit_list" | grep -c "$state$") echo "$state.value $count" - extinfo=$(echo "$tmp" | grep "$state$" | cut -d " " -f 1 | tr '\n' ' ') - if [ -n "$extinfo" ]; then - echo "$state.extinfo" "$extinfo" + extinfo=$(echo "$state_unit_list" | grep "$state$" | cut -d " " -f 1 | tr '\n' ' ') + if [ "$silence_active_extinfo" = "0" ]; then + if [ -n "$extinfo" ]; then + echo "$state.extinfo" "$extinfo" + fi + else + if [ -n "$extinfo" ] && [ "$state" != "active" ]; then + echo "$state.extinfo" "$extinfo" + fi fi done } diff --git a/plugins/systemd/timesync_status b/plugins/systemd/timesync_status new file mode 100755 index 00000000..0b200c60 --- /dev/null +++ b/plugins/systemd/timesync_status @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +import re +import subprocess +import sys +import textwrap + + +''' +=head1 NAME + +timesync_status - monitor ntp status with systemd-timesyncd + +=head1 APPLICABLE SYSTEMS + +All systems using systemd-timesyncd as its NTP-client. However, this +plugin itself also needs Python 3.5+ to call subprocess.run. + +=head1 CONFIGURATION + +This plugin should work out-of-the-box with autoconf. It does expect +timedatectl to be on $PATH, but that should always be the case in a +normal system. + +=head1 INTERPRETATION + +This plugin shows a graph with one line for every NTP metric it measure. +Metrics are shown with their usual name, and are explained in their +respective info fields. + +This plugin issues no warnings or critical states. + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf + +=head1 VERSION + +1.0 + +=head1 AUTHOR + +Bert Peters + +=head1 LICENSE + +GPLv2 +''' + + +def parse_time(value): + value = value.strip() + if ' ' in value: + return sum(parse_time(x) for x in value.split(' ')) + + match = re.match(r'^([+-]?[0-9.]+)([a-z]+)$', value) + if not match: + raise ValueError('Invalid time ' + value) + + value = float(match.group(1)) + suffix = match.group(2) + + if suffix == 'min': + value *= 60 + elif suffix == 'ms': + value /= 1000 + elif suffix == 'us': + value /= 1e6 + + return value + + +def parse_response(data): + values = {} + for line in data.splitlines(): + k, v = line.split(': ', 1) + values[k.strip()] = v.strip() + + return values + + +def retrieve(): + result = subprocess.run(['timedatectl', 'timesync-status'], capture_output=True) + if result.returncode != 0: + sys.exit('timedatectl failed') + + output = result.stdout.decode('utf-8') + values = parse_response(output) + + print('offset.value', parse_time(values['Offset'])) + print('delay.value', parse_time(values['Delay'])) + print('delay.extinfo', 'Server', values['Server']) + print('jitter.value', parse_time(values['Jitter'])) + print('poll.value', parse_time(values['Poll interval'].split('(')[0])) + + +def autoconf(): + result = subprocess.run(['timedatectl', 'status'], capture_output=True) + if result.returncode != 0: + print('no (failed to run timedatectl)') + return + + values = parse_response(result.stdout.decode('utf-8')) + if values['NTP service'] == 'active': + print('yes') + else: + print('no (ntp service not running)') + + +def config(): + print(textwrap.dedent('''\ + graph_title Timesync status + graph_vlabel s + graph_category time + + offset.label Offset + offset.info Time difference between source and local + + delay.label Delay + delay.info Roundtrip time to the NTP-server + + jitter.label Jitter + jitter.info Difference in offset between two subsequent samples + + poll.label Polling time + poll.info Time between two subsequent NTP-polls + ''')) + + +if __name__ == '__main__': + if len(sys.argv) == 1: + retrieve() + elif sys.argv[1] == 'config': + config() + elif sys.argv[1] == 'autoconf': + autoconf() diff --git a/plugins/tarsnap/README.md b/plugins/tarsnap/README.md index b66af942..f0d6de18 100644 --- a/plugins/tarsnap/README.md +++ b/plugins/tarsnap/README.md @@ -8,18 +8,18 @@ This plugin creates two graphs: * *tarsnap_total* - summarising the total amount of data the local tarsnap instance has stored on the service (total and compressed). * *tarsnap_unique* - summarising the total amount of unique (deduplicated) -data the local tarsnap instance has stored on the service (total and +data the local tarsnap instance has stored on the service (total and compressed). The compressed value here is the actual amount of data stored on the tarnap servers and what tarsnap uses for billing. Usage ----- -Add the following to your backup script (after tarsnap has run), or to a +Add the following to your backup script (after tarsnap has run), or to a cron job: /usr/local/bin/tarsnap --print-stats > /path/to/tarsnap-stats.txt - + N.B.: ensure `/path/to/munin-stats.txt` is readable by munin-node. Configuration diff --git a/plugins/tarsnap/tarsnap b/plugins/tarsnap/tarsnap index a5633cff..006d54ce 100755 --- a/plugins/tarsnap/tarsnap +++ b/plugins/tarsnap/tarsnap @@ -3,14 +3,14 @@ # Munin plugin for Tarsnap # # https://github.com/warrenguy/munin-tarsnap -# +# # USAGE: # # Add the following to your backup script (after tarsnap has run), or to a # cron job: # # /usr/local/bin/tarsnap --print-stats > /path/to/tarsnap-stats.txt -# +# # N.B.: ensure /path/to/munin-stats.txt is readable by munin-node. The # default path this script tries is /var/lib/munin/tarsnap-stats.txt # @@ -37,7 +37,7 @@ case $1 in multigraph tarsnap_total graph_title Tarsnap total data graph_vlabel bytes -graph_category backup +graph_category backup total_size.label Total size total_compressed.label Total size (compressed) diff --git a/plugins/tcp/tcp-states b/plugins/tcp/tcp-states index eb093263..b83dd257 100755 --- a/plugins/tcp/tcp-states +++ b/plugins/tcp/tcp-states @@ -21,17 +21,15 @@ if [ "$1" = "autoconf" ]; then if ( netstat -nt 2>/dev/null >/dev/null ); then echo yes - exit 0 else if [ $? -eq 127 ] then echo "no (netstat program not found)" - exit 1 else echo no - exit 1 fi fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/teamspeak/teamspeak_user b/plugins/teamspeak/teamspeak_user index a2490e8d..beba06ea 100755 --- a/plugins/teamspeak/teamspeak_user +++ b/plugins/teamspeak/teamspeak_user @@ -26,9 +26,9 @@ use Net::Telnet; # CONFIG HERE! my $hostname = "localhost"; # serveraddress my $port = 10011; # querryport (default: 10011) -my @serverids = (1); # array of virtualserverids (1,2,3,4,...) +my @serverids = (1); # array of virtualserverids (1,2,3,4,...) -my $username = ""; # only set if the default queryuser hasnt enough rights (should work without this) +my $username = ""; # only set if the default queryuser hasn't enough rights (should work without this) my $password = ""; # SCRIPT START! @@ -47,11 +47,11 @@ if(exists $ARGV[0] and $ARGV[0] eq "config") exit 0; } else -{ - my $telnet = new Net::Telnet(Timeout=>5, Errmode=>"return", Prompt=>"/\r/"); +{ + my $telnet = new Net::Telnet(Timeout=>5, Errmode=>"return", Prompt=>"/\r/"); if (!$telnet->open(Host=>$hostname, Port=>$port)) { die exit; - } + } $telnet->waitfor("/Welcome to the TeamSpeak 3 ServerQuery interface/"); foreach my $server (@serverids) { @@ -62,7 +62,7 @@ else $telnet->waitfor("/error id=0 msg=ok/"); } $telnet->cmd("serverinfo"); - + my $clients = 0; my $queryclients = 0; my $line = $telnet->getline(Timeout=>5); @@ -74,7 +74,7 @@ else } $telnet->waitfor("/error id=0 msg=ok/"); print "$server.value ".($clients - $queryclients)."\n"; - } + } $telnet->close; } exit; diff --git a/plugins/teamspeak/ts3v2_ b/plugins/teamspeak/ts3v2_ index be251358..91cb9251 100755 --- a/plugins/teamspeak/ts3v2_ +++ b/plugins/teamspeak/ts3v2_ @@ -1,220 +1,220 @@ -#!/usr/bin/perl -w -# ts3v2_ (perl) -# Munin Plugin for Teamspeak3 Servers -# displays the number of connected users on TS3 servers -# and average transferrate per second over 5 min. -# -# You can use it with symlinks for overview and explicit ids. -# Also you can configure the following variables: -# host -# port -# -# by Marc Urben, www.oxi.ch or www.oom.ch -# -# Based on Tim Wulkau's script. Thank you! -# www.wulkau.de -# -####################################################### -# 02.10.10 - v0.3 -# -now works again with 3.0.0-beta29 -# -# 18.04.10 - v0.2 -# -transfer mode added -# -# 13.04.10 - v0.1 -# -initial release -# -###################################################### - -#%# family=auto -#%# capabilities=autoconf suggest - -use strict; -use Net::Telnet; - -# CONFIG HERE! -my $hostname = $ENV{host} || "localhost"; # serveraddress -my $port = $ENV{port} || 10011; # querryport -my $user = $ENV{user} || "serveradmin"; -my $password = $ENV{password} || ""; -my $filename = "ts3v2_"; - -# SCRIPT START! -#init telnet connection -my $name = ""; my $server = ""; my @num; my $id = 0; my $i = 0; my $myserver; my $transfer; -my $telnet = new Net::Telnet(Timeout=>1, Errmode=>"return", Prompt=>"/\r/"); -if (!$telnet->open(Host=>$hostname, Port=>$port)) { - die "Server could not be reached, please check your config!"; -} - -$telnet->waitfor("/Welcome/"); - -if ($password ne "") { - $telnet->cmd("login client_login_name=".$user." client_login_password=".$password); - my $response = $telnet->getline(Timeout=>1); - if ($response !~ "error id=0 msg=ok") { - $telnet->close; - die "ServerQuery login failed: ".$response; - } -} - -#get argument -my $argument = ""; -if ($0 =~ /$filename(\w+)$/i) { - $argument = $1; -} elsif ($ARGV[0] ne "suggest" and $ARGV[0] ne "autoconf") { - die "Error: We need to know what serverid you want, so link this plugin as " - .$filename."1, ".$filename."2 or ".$filename."overview.\n"; -} - -#check for overview, transfer or single server mode -my $mode = ""; -if ($argument eq "overview" or $argument eq "transfer" or (defined $ARGV[0] and ($ARGV[0] eq "suggest" or $ARGV[0] eq "autoconf"))) { - if ($argument eq "overview") { - $mode = "o"; - } elsif($argument eq "transfer") { - $mode = "t"; - } - $telnet->cmd("serverlist"); - my $line = $telnet->getline(Timeout=>1); - my @servers = split(/\|/, $line); - foreach (@servers) { - if ($_ =~ m/virtualserver_id=(\d+) virtual/) { - push(@num, $1); - } else { - die "ERROR: server string not recognized!\n"; - } - } - $telnet->waitfor("/error id=0 msg=ok/"); -} elsif ($argument =~ /^[+-]?\d+$/ ) { - $mode = "s"; - $server = $argument; -} else { - die "ERROR: unknown plugin mode: $argument\n"; -} - -#check for config mode -if (exists $ARGV[0] and $ARGV[0] eq "autoconf") { - print "yes"; - exit 0; -} elsif (exists $ARGV[0] and $ARGV[0] eq "suggest") { - print "overview\n"; - print "transfer\n"; - foreach (@num) { - print $_."\n"; - } - exit 0; -} elsif (exists $ARGV[0] and $ARGV[0] eq "config") { - if ($mode eq "s") { - #single server mode - $telnet->cmd("use sid=".$server); - $telnet->waitfor("/error id=0 msg=ok/"); - $telnet->cmd("serverinfo"); - my $line = $telnet->getline(Timeout=>1); - - if ($line =~ m/virtualserver_id=(\d+) virtual/) { - my $id = $1; - } else { - die "ERROR: server string not recognized!\n"; - } - if ($line =~ m/virtualserver_name=(.*) virtualserver_welcomemessage/) { - $name = $1; - $name =~ s/\\s/ /g; - } else { - die "ERROR: server string not recognized!\n"; - } - $telnet->waitfor("/error id=0 msg=ok/"); - - print "graph_title Teamspeak Users ".$name."\n"; - print "graph_vlabel Connected Teamspeak Users\n"; - print "graph_category voip\n"; - print "graph_info This graph shows the number of connected users on a Teamspeak3 server\n"; - print "users.label Users\n"; - print "users.info Connected users to ".$name."\n"; - print "users.type GAUGE\n"; - - exit 0; - } elsif ($mode eq "o") { - #overview mode - print "graph_title Teamspeak Users Overview\n"; - print "graph_vlabel Connected Teamspeak Users\n"; - print "graph_category voip\n"; - print "graph_info This graph shows the number of connected users on a Teamspeak3 server\n"; - - foreach (@num) { - $telnet->cmd("use sid=".$_); - $telnet->waitfor("/error id=0 msg=ok/"); - $telnet->cmd("serverinfo"); - my $line = $telnet->getline(Timeout=>1); - if ($line =~ m/virtualserver_name=(.*) virtualserver_welcomemessage/) { - $name = $1; - $name =~ s/\\s/ /g; - } else { - die "ERROR: server string not recognized!\n"; - } - $telnet->waitfor("/error id=0 msg=ok/"); - print $_.".label ".$name."\n"; - print $_.".info Users connected on ".$name."\n"; - print $_.".type GAUGE\n"; - } - exit 0; - } elsif ($mode eq "t") { - #transfer mode - print "graph_title Teamspeak Transfer Overview\n"; - print "graph_vlabel Teamspeak Transfer\n"; - print "graph_category voip\n"; - print "graph_info This graph shows the Teamspeak3 Transfer Overview\n"; - print "transfer.label ~ Transfer per second\n"; - print "transfer.info Transfer per second over 5 min\n"; - print "transfer.type DERIVE\n"; - exit 0; - } -} else { - #go go magic, go! - if ($mode eq "s") { - #single mode - $telnet->cmd("use sid=".$server); - $telnet->waitfor("/error id=0 msg=ok/"); - $telnet->cmd("serverinfo"); - - my $line = $telnet->getline(Timeout=>1); - if ($line =~ m/virtualserver_clientsonline=(\d+) /) { - print "users.value ".($1-1)."\n"; - } else { - print "users.value 0\n"; - } - $telnet->waitfor("/error id=0 msg=ok/"); - } elsif ($mode eq "o") { - #overview mode - for (@num) { - $telnet->waitfor("/Welcome/"); - $telnet->cmd("use sid=".$_); - $telnet->waitfor("/error id=0 msg=ok/"); - $telnet->cmd("serverinfo"); - - my $line = $telnet->getline(Timeout=>1); - if ($line =~ m/virtualserver_clientsonline=(\d+) /) { - print $_.".value ".($1-1)."\n"; - } else { - print $_.".value 0\n"; - } - $telnet->waitfor("/error id=0 msg=ok/"); - } - } elsif ($mode eq "t") { - $telnet->cmd("hostinfo"); - my @tsave; - my $line = $telnet->getline(Timeout=>1); - if ($line =~ m/connection_bytes_received_total=(\d+) connection_bandwidth_sent_last_second_total/) { - $transfer = $1; - print "transfer.value ".(($transfer-($transfer%300))/300)."\n"; - } else { - die "ERROR: server string not recognized!\n"; - } - $telnet->waitfor("/error id=0 msg=ok/"); - - } -} - -#close telnet connection -$telnet->close; -exit; +#!/usr/bin/perl -w +# ts3v2_ (perl) +# Munin Plugin for Teamspeak3 Servers +# displays the number of connected users on TS3 servers +# and average transferrate per second over 5 min. +# +# You can use it with symlinks for overview and explicit ids. +# Also you can configure the following variables: +# host +# port +# +# by Marc Urben, www.oxi.ch or www.oom.ch +# +# Based on Tim Wulkau's script. Thank you! +# www.wulkau.de +# +####################################################### +# 02.10.10 - v0.3 +# -now works again with 3.0.0-beta29 +# +# 18.04.10 - v0.2 +# -transfer mode added +# +# 13.04.10 - v0.1 +# -initial release +# +###################################################### + +#%# family=auto +#%# capabilities=autoconf suggest + +use strict; +use Net::Telnet; + +# CONFIG HERE! +my $hostname = $ENV{host} || "localhost"; # serveraddress +my $port = $ENV{port} || 10011; # querryport +my $user = $ENV{user} || "serveradmin"; +my $password = $ENV{password} || ""; +my $filename = "ts3v2_"; + +# SCRIPT START! +#init telnet connection +my $name = ""; my $server = ""; my @num; my $id = 0; my $i = 0; my $myserver; my $transfer; +my $telnet = new Net::Telnet(Timeout=>1, Errmode=>"return", Prompt=>"/\r/"); +if (!$telnet->open(Host=>$hostname, Port=>$port)) { + die "Server could not be reached, please check your config!"; +} + +$telnet->waitfor("/Welcome/"); + +if ($password ne "") { + $telnet->cmd("login client_login_name=".$user." client_login_password=".$password); + my $response = $telnet->getline(Timeout=>1); + if ($response !~ "error id=0 msg=ok") { + $telnet->close; + die "ServerQuery login failed: ".$response; + } +} + +#get argument +my $argument = ""; +if ($0 =~ /$filename(\w+)$/i) { + $argument = $1; +} elsif ($ARGV[0] ne "suggest" and $ARGV[0] ne "autoconf") { + die "Error: We need to know what serverid you want, so link this plugin as " + .$filename."1, ".$filename."2 or ".$filename."overview.\n"; +} + +#check for overview, transfer or single server mode +my $mode = ""; +if ($argument eq "overview" or $argument eq "transfer" or (defined $ARGV[0] and ($ARGV[0] eq "suggest" or $ARGV[0] eq "autoconf"))) { + if ($argument eq "overview") { + $mode = "o"; + } elsif($argument eq "transfer") { + $mode = "t"; + } + $telnet->cmd("serverlist"); + my $line = $telnet->getline(Timeout=>1); + my @servers = split(/\|/, $line); + foreach (@servers) { + if ($_ =~ m/virtualserver_id=(\d+) virtual/) { + push(@num, $1); + } else { + die "ERROR: server string not recognized!\n"; + } + } + $telnet->waitfor("/error id=0 msg=ok/"); +} elsif ($argument =~ /^[+-]?\d+$/ ) { + $mode = "s"; + $server = $argument; +} else { + die "ERROR: unknown plugin mode: $argument\n"; +} + +#check for config mode +if (exists $ARGV[0] and $ARGV[0] eq "autoconf") { + print "yes"; + exit 0; +} elsif (exists $ARGV[0] and $ARGV[0] eq "suggest") { + print "overview\n"; + print "transfer\n"; + foreach (@num) { + print $_."\n"; + } + exit 0; +} elsif (exists $ARGV[0] and $ARGV[0] eq "config") { + if ($mode eq "s") { + #single server mode + $telnet->cmd("use sid=".$server); + $telnet->waitfor("/error id=0 msg=ok/"); + $telnet->cmd("serverinfo"); + my $line = $telnet->getline(Timeout=>1); + + if ($line =~ m/virtualserver_id=(\d+) virtual/) { + my $id = $1; + } else { + die "ERROR: server string not recognized!\n"; + } + if ($line =~ m/virtualserver_name=(.*) virtualserver_welcomemessage/) { + $name = $1; + $name =~ s/\\s/ /g; + } else { + die "ERROR: server string not recognized!\n"; + } + $telnet->waitfor("/error id=0 msg=ok/"); + + print "graph_title Teamspeak Users ".$name."\n"; + print "graph_vlabel Connected Teamspeak Users\n"; + print "graph_category voip\n"; + print "graph_info This graph shows the number of connected users on a Teamspeak3 server\n"; + print "users.label Users\n"; + print "users.info Connected users to ".$name."\n"; + print "users.type GAUGE\n"; + + exit 0; + } elsif ($mode eq "o") { + #overview mode + print "graph_title Teamspeak Users Overview\n"; + print "graph_vlabel Connected Teamspeak Users\n"; + print "graph_category voip\n"; + print "graph_info This graph shows the number of connected users on a Teamspeak3 server\n"; + + foreach (@num) { + $telnet->cmd("use sid=".$_); + $telnet->waitfor("/error id=0 msg=ok/"); + $telnet->cmd("serverinfo"); + my $line = $telnet->getline(Timeout=>1); + if ($line =~ m/virtualserver_name=(.*) virtualserver_welcomemessage/) { + $name = $1; + $name =~ s/\\s/ /g; + } else { + die "ERROR: server string not recognized!\n"; + } + $telnet->waitfor("/error id=0 msg=ok/"); + print $_.".label ".$name."\n"; + print $_.".info Users connected on ".$name."\n"; + print $_.".type GAUGE\n"; + } + exit 0; + } elsif ($mode eq "t") { + #transfer mode + print "graph_title Teamspeak Transfer Overview\n"; + print "graph_vlabel Teamspeak Transfer\n"; + print "graph_category voip\n"; + print "graph_info This graph shows the Teamspeak3 Transfer Overview\n"; + print "transfer.label ~ Transfer per second\n"; + print "transfer.info Transfer per second over 5 min\n"; + print "transfer.type DERIVE\n"; + exit 0; + } +} else { + #go go magic, go! + if ($mode eq "s") { + #single mode + $telnet->cmd("use sid=".$server); + $telnet->waitfor("/error id=0 msg=ok/"); + $telnet->cmd("serverinfo"); + + my $line = $telnet->getline(Timeout=>1); + if ($line =~ m/virtualserver_clientsonline=(\d+) /) { + print "users.value ".($1-1)."\n"; + } else { + print "users.value 0\n"; + } + $telnet->waitfor("/error id=0 msg=ok/"); + } elsif ($mode eq "o") { + #overview mode + for (@num) { + $telnet->waitfor("/Welcome/"); + $telnet->cmd("use sid=".$_); + $telnet->waitfor("/error id=0 msg=ok/"); + $telnet->cmd("serverinfo"); + + my $line = $telnet->getline(Timeout=>1); + if ($line =~ m/virtualserver_clientsonline=(\d+) /) { + print $_.".value ".($1-1)."\n"; + } else { + print $_.".value 0\n"; + } + $telnet->waitfor("/error id=0 msg=ok/"); + } + } elsif ($mode eq "t") { + $telnet->cmd("hostinfo"); + my @tsave; + my $line = $telnet->getline(Timeout=>1); + if ($line =~ m/connection_bytes_received_total=(\d+) connection_bandwidth_sent_last_second_total/) { + $transfer = $1; + print "transfer.value ".(($transfer-($transfer%300))/300)."\n"; + } else { + die "ERROR: server string not recognized!\n"; + } + $telnet->waitfor("/error id=0 msg=ok/"); + + } +} + +#close telnet connection +$telnet->close; +exit; diff --git a/plugins/teamspeak/tsuser b/plugins/teamspeak/tsuser index 30ea2e07..39507773 100755 --- a/plugins/teamspeak/tsuser +++ b/plugins/teamspeak/tsuser @@ -1,5 +1,5 @@ #!/usr/bin/perl -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 dated June, @@ -68,7 +68,7 @@ elsif ( exists $ARGV[0] and $ARGV[0] eq "autoconf" ) } } } -else +else { foreach my $server (@uports) { @@ -76,7 +76,7 @@ else print $FS "si ".$server, "\n\n"; my $MASK = $string."=*"; - while(<$FS>) + while(<$FS>) { my $input_line = $_; if ( $input_line =~ m/($MASK)/ ) diff --git a/plugins/thecus/snmp__thecus_fans b/plugins/thecus/snmp__thecus_fans index ec43e277..27f237b3 100755 --- a/plugins/thecus/snmp__thecus_fans +++ b/plugins/thecus/snmp__thecus_fans @@ -3,7 +3,7 @@ =head1 NAME -snmp__thecus_fans - Munin plugin to retrive fanspeed readings from a Thecus +snmp__thecus_fans - Munin plugin to retrieve fanspeed readings from a Thecus NAS device running SNMP. =head1 APPLICABLE SYSTEMS @@ -98,7 +98,7 @@ my $session = Munin::Plugin::SNMP->session(-translate => my $fan1 = $session->get_single ("1.3.6.1.4.1.14822.101.21.5200.1.1.0") || 'U'; my $fan2 = $session->get_single ("1.3.6.1.4.1.14822.101.21.5200.1.2.0") || 'U'; -#print "Retrived uptime is '$uptime'\n" if $DEBUG; +#print "Retrieved uptime is '$uptime'\n" if $DEBUG; print "fan1.value ", $fan1, "\n"; print "fan2.value ", $fan2, "\n"; diff --git a/plugins/thin/thin_memory b/plugins/thin/thin_memory index 580ee91e..f1557fc4 100755 --- a/plugins/thin/thin_memory +++ b/plugins/thin/thin_memory @@ -1,100 +1,102 @@ #!/usr/bin/env ruby -# thin_memory - A munin plugin for Linux to monitor memory size of each individual thin process -# -# For Linux ONLY ! -# DOES NOT WORK on OSX, Solaris or BSD. -# only linux, because this script relies on proc filesystem -# -# Original author: -# Frederico de Souza Araujo - fred.the.master@gmail.com -# http://www.frederico-araujo.com -# -# Usurper: -# Adam Michel - elfurbe@furbism.com -# http://www.furbism.com -# -# Originally based on: -# thin_process_memory - -# A munin plugin to monitor memory size of -# each individual thin process -# by Ben VandenBos and Avvo, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# + +=begin + +thin_memory - A munin plugin for Linux to monitor memory size of each individual thin process + +For Linux ONLY ! +DOES NOT WORK on OSX, Solaris or BSD. +only linux, because this script relies on proc filesystem + +Original author: + Frederico de Souza Araujo - fred.the.master@gmail.com + http://www.frederico-araujo.com + +Usurper: + Adam Michel - elfurbe@furbism.com + http://www.furbism.com + +Originally based on: + thin_process_memory - + A munin plugin to monitor memory size of + each individual thin process + by Ben VandenBos and Avvo, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + #%# family=auto #%# capabilities=autoconf +=end + module Munin class ThinProcessMemory - # run main method def run - instances = get_pids() + instances = get_pids instances.each do |instance| - pid, port = instance.split("|") - rss = (pid_rss(pid).to_i)/1024 + pid, port = instance.split('|') + rss = pid_rss(pid).to_i / 1024 puts "thin_#{port}.value #{rss}" end end - + # only get the memory for each pid def pid_rss(pid) res = `grep "VmRSS" /proc/#{pid}/status`.split[1] - if res.match("cannot access") - return nil + if res.match('cannot access') + nil else - return res + res end end - + # fetch all pids that match thin def get_pids pids = `pgrep -f 'thin' -l | awk -F " " '{ if (substr( $4, 10, 4)>=1) print $1"|"substr( $4, 10, 4)}' | sort -t'|' -nk 2`.split(/\r?\n/) end def autoconf - get_pids().length > 0 + get_pids.length > 0 end - end end mpm = Munin::ThinProcessMemory.new case ARGV[0] -when "config" - puts "graph_title Thin Memory" - puts "graph_vlabel RSS" - puts "graph_category webserver" - puts "graph_args --base 1024 -l 0" - puts "graph_scale yes" - puts "graph_info Tracks the size of individual thin processes" +when 'config' + puts 'graph_title Thin Memory' + puts 'graph_vlabel RSS' + puts 'graph_category webserver' + puts 'graph_args --base 1024 -l 0' + puts 'graph_scale yes' + puts 'graph_info Tracks the size of individual thin processes' mpm.get_pids.each do |instance| - pid, port = instance.split("|") + pid, port = instance.split('|') puts "thin_#{port}.label thin_#{port}" puts "thin_#{port}.info Process memory" puts "thin_#{port}.type GAUGE" puts "thin_#{port}.min 0" end -when "autoconf" +when 'autoconf' if mpm.autoconf - puts "yes" + puts 'yes' exit 0 end - puts "no" - exit 1 + puts 'no' + exit 0 else mpm.run end diff --git a/plugins/thin/thin_threads b/plugins/thin/thin_threads index d559bd08..224edbf2 100755 --- a/plugins/thin/thin_threads +++ b/plugins/thin/thin_threads @@ -1,104 +1,106 @@ #!/usr/bin/env ruby -# thin_threads - -# A munin plugin for Linux to monitor how many threads per thin process -# -# For Linux ONLY ! -# DOES NOT WORK on OSX, Solaris or BSD. -# only linux, because this script relies on proc filesystem -# -# Original author: -# Frederico de Souza Araujo - fred.the.master@gmail.com -# http://www.frederico-araujo.com -# -# Usurper: -# Adam Michel - elfurbe@furbism.com -# http://www.furbism.com -# -# Originally based on: -# thin_process_memory - -# A munin plugin to monitor memory size of -# each individual thin process -# by Ben VandenBos and Avvo, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# + +=begin + +thin_threads - + A munin plugin for Linux to monitor how many threads per thin process + +For Linux ONLY ! +DOES NOT WORK on OSX, Solaris or BSD. +only linux, because this script relies on proc filesystem + +Original author: + Frederico de Souza Araujo - fred.the.master@gmail.com + http://www.frederico-araujo.com + +Usurper: + Adam Michel - elfurbe@furbism.com + http://www.furbism.com + +Originally based on: + thin_process_memory - + A munin plugin to monitor memory size of + each individual thin process + by Ben VandenBos and Avvo, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + #%# family=auto #%# capabilities=autoconf +=end + module Munin class ThinThreads - def run - instances = get_pids() + instances = get_pids instances.each do |instance| - pid, port = instance.split("|") - rss = (get_threads(pid).to_i) + pid, port = instance.split('|') + rss = get_threads(pid).to_i puts "thin_#{port}.value #{rss}" end end - + # only get threads count for each pid # Using Proc filesystem # ONLY LINUX! because relies on proc filesystem # TODO: make this work on OSX and Solaris, # so the whole unix gang is happy ;) def get_threads(pid) - res = `grep "Threads" /proc/#{pid}/status | cut -d ":" -f2`.gsub(/\s+/, "") - if res.match("cannot access") - return nil + res = `grep "Threads" /proc/#{pid}/status | cut -d ":" -f2`.gsub(/\s+/, '') + if res.match('cannot access') + nil else - return res + res end end - + # fetch all pids that match thin def get_pids pids = `pgrep -f 'thin' -l | awk -F " " '{ if (substr( $4, 10, 4)>=1) print $1"|"substr( $4, 10, 4)}' | sort -t'|' -nk 2`.split(/\r?\n/) end def autoconf - get_pids().length > 0 + get_pids.length > 0 end - end end mpm = Munin::ThinThreads.new case ARGV[0] -when "config" - puts "graph_title Thin Threads" - puts "graph_vlabel Threads" - puts "graph_category webserver" - puts "graph_args -l 0" - puts "graph_scale yes" - puts "graph_info Tracks how many threads per thin processes" +when 'config' + puts 'graph_title Thin Threads' + puts 'graph_vlabel Threads' + puts 'graph_category webserver' + puts 'graph_args -l 0' + puts 'graph_scale yes' + puts 'graph_info Tracks how many threads per thin processes' mpm.get_pids.each do |instance| - pid, port = instance.split("|") + pid, port = instance.split('|') puts "thin_#{port}.label thin_#{port}" puts "thin_#{port}.info Threads per Thin process" puts "thin_#{port}.type GAUGE" puts "thin_#{port}.min 0" end -when "autoconf" +when 'autoconf' if mpm.autoconf - puts "yes" + puts 'yes' exit 0 end - puts "no" - exit 1 + puts 'no' + exit 0 else mpm.run end diff --git a/plugins/thin/thins_peak_memory b/plugins/thin/thins_peak_memory index 3797cc44..ee289d8a 100755 --- a/plugins/thin/thins_peak_memory +++ b/plugins/thin/thins_peak_memory @@ -1,51 +1,54 @@ #!/usr/bin/env ruby -# thin_peak_memory - -# A munin plugin for Linux to monitor the maximum memory size -# that an each individual thin process has reached -# -# For Linux ONLY ! -# DOES NOT WORK on OSX, Solaris or BSD. -# only linux, because this script relies on proc filesystem -# -# Author: -# Frederico de Souza Araujo - fred.the.master@gmail.com -# http://www.frederico-araujo.com -# -# Based on: -# thin_process_memory - -# A munin plugin to monitor memory size of -# each individual thin process -# by Ben VandenBos and Avvo, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# + +=begin + +thin_peak_memory - + A munin plugin for Linux to monitor the maximum memory size + that an each individual thin process has reached + +For Linux ONLY ! +DOES NOT WORK on OSX, Solaris or BSD. +only linux, because this script relies on proc filesystem + +Author: + Frederico de Souza Araujo - fred.the.master@gmail.com + http://www.frederico-araujo.com + +Based on: + thin_process_memory - + A munin plugin to monitor memory size of + each individual thin process + by Ben VandenBos and Avvo, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 +as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + #%# family=auto #%# capabilities=autoconf +=end + module Munin class ThinPeakMemory - def run - instances = get_pids() + instances = get_pids instances.each do |instance| - pid, port = instance.split("|") - hwm = (pid_hwm(pid).to_i)/1024 + pid, port = instance.split('|') + hwm = pid_hwm(pid).to_i / 1024 puts "thin_#{port}.value #{hwm}" end end - + # only get VmHWM count for each pid # (Virtual Memory High Water Mark) # Using Proc filesystem @@ -54,49 +57,48 @@ module Munin # so the whole unix gang is happy ;) def pid_hwm(pid) res = `grep "VmHWM" /proc/#{pid}/status`.split[1] - if res.match("cannot access") - return nil + if res.match('cannot access') + nil else - return res + res end end - + # fetch all pids that match thin def get_pids pids = `pgrep -f 'thin' -l | awk -F " " '{ if (substr( $4, 10, 4)>=1) print $1"|"substr( $4, 10, 4)}' | sort -t'|' -nk 2`.split(/\r?\n/) end def autoconf - get_pids().length > 0 + get_pids.length > 0 end - end end mpm = Munin::ThinPeakMemory.new case ARGV[0] -when "config" - puts "graph_title Thin Peak Memory (High Water Mark)" - puts "graph_vlabel HWM" - puts "graph_category webserver" - puts "graph_args -l 0" - puts "graph_scale yes" - puts "graph_info Tracks the peak memory of thin processes, aka High Water Mark." +when 'config' + puts 'graph_title Thin Peak Memory (High Water Mark)' + puts 'graph_vlabel HWM' + puts 'graph_category webserver' + puts 'graph_args -l 0' + puts 'graph_scale yes' + puts 'graph_info Tracks the peak memory of thin processes, aka High Water Mark.' mpm.get_pids.each do |instance| - pid, port = instance.split("|") + pid, port = instance.split('|') puts "thin_#{port}.label thin_#{port}" puts "thin_#{port}.info Peak Memory" puts "thin_#{port}.type GAUGE" puts "thin_#{port}.min 0" end -when "autoconf" +when 'autoconf' if mpm.autoconf - puts "yes" + puts 'yes' exit 0 end - puts "no" - exit 1 + puts 'no' + exit 0 else mpm.run end diff --git a/plugins/tinydns/tinydns_err b/plugins/tinydns/tinydns_err index 2f57b768..20b11a45 100755 --- a/plugins/tinydns/tinydns_err +++ b/plugins/tinydns/tinydns_err @@ -25,7 +25,7 @@ if [ "$1" = "config" ]; then cat - < -Rune Nordbe Skillingstad +Rune Nordbøe Skillingstad =head1 LICENSE @@ -63,7 +63,7 @@ if(!eval "require LWP::UserAgent;") { if(!eval "require XML::Simple;") { $ret .= "XML::Simple not found"; -} +} my $URL = exists $ENV{'url'} ? $ENV{'url'} : "http://%s:%s\@127.0.0.1:%d/manager/status?XML=true"; my $PORT = exists $ENV{'ports'} ? $ENV{'ports'} : 8080; diff --git a/plugins/tor/tor-bandwidth-usage b/plugins/tor/tor-bandwidth-usage index 4c78378a..a5b7db11 100755 --- a/plugins/tor/tor-bandwidth-usage +++ b/plugins/tor/tor-bandwidth-usage @@ -3,7 +3,7 @@ # tor-bandwidth-usage - munin plugin to monitor Tor traffic # # To use this plugin you need the following: -# o Enable accounting on torrc configuration file (even if you dont want to limit bandwidth usage, +# o Enable accounting on torrc configuration file (even if you don't want to limit bandwidth usage, # just put a huge value for on AccountingMax) # example: # AccountingStart day 12:00 @@ -87,7 +87,7 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { if ($failed) { say "no (failed to connect to $address port $port)"; - exit 1; + exit 0; } my $msg = Authenticate($socket); @@ -95,7 +95,7 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { say $socket "QUIT"; close($socket); say "no ($msg)"; - exit 1; + exit 0; } say $socket "QUIT"; diff --git a/plugins/tor/tor_ b/plugins/tor/tor_ index 18929d3d..7029cb9c 100755 --- a/plugins/tor/tor_ +++ b/plugins/tor/tor_ @@ -1,11 +1,10 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- +#!/usr/bin/python3 ''' =head1 NAME -tor_ +tor_ =head1 DESCRIPTION -Wildcard plugin that gathers some metrics from the Tor deamon +Wildcard plugin that gathers some metrics from the Tor daemon https://github.com/daftaupe/munin-tor Derived from https://github.com/mweinelt/munin-tor @@ -27,32 +26,35 @@ The default configuration is below [tor_*] user toranon # or any other user/group that is running tor group toranon -env.torcachefile 'munin_tor_country_stats.json' -env.torconnectmethod 'port' -env.torgeoippath "/usr/share/GeoIP/GeoIP.dat" +env.torcachefile munin_tor_country_stats.json +env.torconnectmethod port +env.torgeoippath /usr/share/GeoIP/GeoIP.dat env.tormaxcountries 15 env.torport 9051 -env.torsocket '/var/run/tor/control' +env.torsocket /var/run/tor/control To make it connect through a socket modify this way [tor_*] user toranon # or any other user/group that is running tor group toranon -env.torcachefile 'munin_tor_country_stats.json' -env.torconnectmethod 'socket' -env.torgeoippath "/usr/share/GeoIP/GeoIP.dat" +env.torcachefile munin_tor_country_stats.json +env.torconnectmethod socket +env.torgeoippath /usr/share/GeoIP/GeoIP.dat env.tormaxcountries 15 env.torport 9051 -env.torsocket '/var/run/tor/control' +env.torsocket /var/run/tor/control =head1 COPYRIGHT MIT License =head1 AUTHOR -daftaupe +Pierre-Alain TORET + +=head1 MAGIC MARKERS + #%# family=auto + #%# capabilities=autoconf suggest ''' -from __future__ import print_function import collections import json import os @@ -70,15 +72,11 @@ except ImportError: default_torcachefile = 'munin_tor_country_stats.json' default_torconnectmethod = 'port' -default_torgeoippath = "/usr/share/GeoIP/GeoIP.dat" +default_torgeoippath = '/usr/share/GeoIP/GeoIP.dat' default_tormaxcountries = 15 default_torport = 9051 default_torsocket = '/var/run/tor/control' -#%# family=auto -#%# capabilities=autoconf suggest - - class ConnectionError(Exception): """Error connecting to the controller""" @@ -110,11 +108,14 @@ def authenticate(controller): def gen_controller(): connect_method = os.environ.get('torconnectmethod', default_torconnectmethod) if connect_method == 'port': - return stem.control.Controller.from_port(port=int(os.environ.get('torport', default_torport))) + return stem.control.Controller.from_port(port=int(os.environ.get('torport', + default_torport))) elif connect_method == 'socket': - return stem.control.Controller.from_socket_file(path=os.environ.get('torsocket', default_torsocket)) + return stem.control.Controller.from_socket_file(path=os.environ.get('torsocket', + default_torsocket)) else: - print("env.torconnectmethod contains an invalid value. Please specify either 'port' or 'socket'.", file=sys.stderr) + print("env.torconnectmethod contains an invalid value. " + "Please specify either 'port' or 'socket'.", file=sys.stderr) sys.exit(1) @@ -133,11 +134,11 @@ class TorPlugin(object): @staticmethod def conf_from_dict(graph, labels): # header - for key, val in graph.iteritems(): + for key, val in graph.items(): print('graph_{} {}'.format(key, val)) # values - for label, attributes in labels.iteritems(): - for key, val in attributes.iteritems(): + for label, attributes in labels.items(): + for key, val in attributes.items(): print('{}.{} {}'.format(label, key, val)) @staticmethod @@ -149,7 +150,7 @@ class TorPlugin(object): print('no (failed to import the required python module "stem": {})'.format(e)) try: - import GeoIP + import GeoIP # noqa: F401 except ImportError as e: print('no (failed to import the required python module "GeoIP": {})'.format(e)) @@ -167,7 +168,8 @@ class TorPlugin(object): @staticmethod def suggest(): - options = ['bandwidth', 'connections', 'countries', 'dormant', 'flags', 'routers', 'traffic'] + options = ['bandwidth', 'connections', 'countries', 'dormant', 'flags', 'routers', + 'traffic'] for option in options: print(option) @@ -251,7 +253,7 @@ class TorConnections(TorPlugin): states = dict((state, 0) for state in stem.ORStatus) for connection in connections: states[connection.rsplit(None, 1)[-1]] += 1 - for state, count in states.iteritems(): + for state, count in states.items(): print('{}.value {}'.format(state.lower(), count)) except stem.connection.AuthenticationFailure as e: print('Authentication failed ({})'.format(e)) @@ -262,8 +264,8 @@ class TorCountries(TorPlugin): # Configure plugin self.cache_dir_name = os.environ.get('torcachedir', None) if self.cache_dir_name is not None: - self.cache_dir_name = os.path.join(self.cache_dir_name, - os.environ.get('torcachefile', default_torcachefile)) + self.cache_dir_name = os.path.join( + self.cache_dir_name, os.environ.get('torcachefile', default_torcachefile)) max_countries = os.environ.get('tormaxcountries', default_tormaxcountries) self.max_countries = int(max_countries) @@ -300,7 +302,7 @@ class TorCountries(TorPlugin): try: with open(self.cache_dir_name) as f: countries_num = json.load(f) - except: + except (IOError, ValueError): # Fallback if cache_dir_name is not set, unreadable or any other # error countries_num = self.top_countries() @@ -425,9 +427,7 @@ class TorRouters(TorPlugin): 'vlabel': 'routers', 'category': 'network', 'info': 'known Tor onion routers'} - labels = {'routers': {'label': 'routers', 'min': 0, 'type': 'GAUGE'} } - - + labels = {'routers': {'label': 'routers', 'min': 0, 'type': 'GAUGE'}} TorPlugin.conf_from_dict(graph, labels) def fetch(self): @@ -437,19 +437,17 @@ class TorRouters(TorPlugin): except stem.connection.AuthenticationFailure as e: print('Authentication failed ({})'.format(e)) return - - response = controller.get_info('ns/all', None) if response is None: print("Error while reading ns/all from Tor daemon", file=sys.stderr) sys.exit(1) else: routers = response.split('\n') - onr = 0 - for router in routers: - if router[0] == "r": - onr += 1 - + onr = 0 + for router in routers: + if router[0] == "r": + onr += 1 + print('routers.value {}'.format(onr)) @@ -460,7 +458,7 @@ class TorTraffic(TorPlugin): def conf(self): graph = {'title': 'Tor traffic', 'args': '-l 0 --base 1024', - 'vlabel': 'data', + 'vlabel': 'bytes/s', 'category': 'network', 'info': 'bytes read/written'} labels = {'read': {'label': 'read', 'min': 0, 'type': 'DERIVE'}, @@ -491,7 +489,7 @@ class TorTraffic(TorPlugin): ########################## -# Main +# Main ########################## @@ -524,7 +522,8 @@ def main(): elif __file__.endswith('_traffic'): provider = TorTraffic() else: - print('Unknown plugin name, try "suggest" for a list of possible ones.', file=sys.stderr) + print('Unknown plugin name, try "suggest" for a list of possible ones.', + file=sys.stderr) sys.exit(1) if param == 'config': @@ -535,5 +534,6 @@ def main(): print('Unknown parameter "{}"'.format(param), file=sys.stderr) sys.exit(1) + if __name__ == '__main__': main() diff --git a/plugins/tor/tor_routers b/plugins/tor/tor_routers index 4709bfd4..96c83908 100755 --- a/plugins/tor/tor_routers +++ b/plugins/tor/tor_routers @@ -78,7 +78,7 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { if ($failed) { print "no (failed to connect to $address port $port)\n"; - exit 1; + exit 0; } my $msg = Authenticate($socket); @@ -86,7 +86,7 @@ if ($ARGV[0] and $ARGV[0] eq "autoconf") { print $socket "QUIT\r\n"; close($socket); print "no ($msg)\n"; - exit 1; + exit 0; } print $socket "QUIT\r\n"; diff --git a/plugins/torrent/deluge_ b/plugins/torrent/deluge_ index 3387f518..0394d63f 100755 --- a/plugins/torrent/deluge_ +++ b/plugins/torrent/deluge_ @@ -121,25 +121,25 @@ log = logging.getLogger("delugeStats") log.setLevel(logging.WARNING) conf = { - 'host': os.getenv('host', '127.0.0.1'), - 'port': int(os.getenv('port', '58846')), - 'username': os.getenv('username', ''), - 'password': os.getenv('password', '') + 'host': os.getenv('host', '127.0.0.1'), + 'port': int(os.getenv('port', '58846')), + 'username': os.getenv('username', ''), + 'password': os.getenv('password', '') } names_for_munin = { - 'numConnections': 'numConnections', - 'payloadUploadRate': 'payloadUploadRate', - 'overheadUploadRate': 'overheadUploadRate', - 'payloadDownloadRate': 'payloadDownloadRate', - 'overheadDownloadRate': 'overheadDownloadRate', - 'state.Seeding': 'seeding', - 'state.Downloading': 'downloading', - 'state.Paused': 'paused', - 'state.Error': 'error', - 'state.Queued': 'queued', - 'state.Checking': 'checking', - 'state.Other': 'other' + 'numConnections': 'numConnections', + 'payloadUploadRate': 'payloadUploadRate', + 'overheadUploadRate': 'overheadUploadRate', + 'payloadDownloadRate': 'payloadDownloadRate', + 'overheadDownloadRate': 'overheadDownloadRate', + 'state.Seeding': 'seeding', + 'state.Downloading': 'downloading', + 'state.Paused': 'paused', + 'state.Error': 'error', + 'state.Queued': 'queued', + 'state.Checking': 'checking', + 'state.Other': 'other' } torrent_states = ["Downloading", @@ -300,11 +300,12 @@ def print_config(mode): "graph_info This graph shows the number of connections used by Deluge Torrent") print(names_for_munin["numConnections"] + ".label connections") print(names_for_munin["numConnections"] + ".min 0") - print(names_for_munin["numConnections"] + - ".info The number of connections used by Deluge Torrent") + print(names_for_munin["numConnections"] + + ".info The number of connections used by Deluge Torrent") elif mode == "bandwidth": print("graph_title Bandwidth usage") - print("graph_order payloadDownloadRate overheadDownloadRate payloadUploadRate overheadUploadRate") + print("graph_order payloadDownloadRate overheadDownloadRate payloadUploadRate " + "overheadUploadRate") print("graph_args --base 1024 -r") print("graph_vlabel bytes/s : down(-) and up(+)") print("graph_scale yes") @@ -322,7 +323,8 @@ def print_config(mode): print("overheadDownloadRate.draw STACK") print("overheadDownloadRate.min 0") print("overheadDownloadRate.graph no") - print("overheadDownloadRate.info Bandwidth 'lost' due to overhead while downloading and uploading torrents") + print("overheadDownloadRate.info Bandwidth 'lost' due to overhead while downloading and " + "uploading torrents") print("payloadUploadRate.label payload") print("payloadUploadRate.draw AREA") @@ -334,7 +336,8 @@ def print_config(mode): print("overheadUploadRate.draw STACK") print("overheadUploadRate.min 0") print("overheadUploadRate.negative overheadDownloadRate") - print("overheadUploadRate.info Bandwidth 'lost' due to overhead while downloading and uploading torrents") + print("overheadUploadRate.info Bandwidth 'lost' due to overhead while downloading and " + "uploading torrents") elif mode == "states": print("graph_title Torrents states") @@ -350,13 +353,12 @@ def print_config(mode): print("graph_period second") for state_name in torrent_states: - print(names_for_munin["state." + - state_name] + ".label " + state_name) + print(names_for_munin["state." + state_name] + ".label " + state_name) print(names_for_munin["state." + state_name] + ".draw AREASTACK") print(names_for_munin["state." + state_name] + ".type GAUGE") print(names_for_munin["state." + state_name] + ".min 0") - print(names_for_munin["state." + state_name] + - ".info Number of torrents in the '" + state_name + "' state") + print(names_for_munin["state." + state_name] + + ".info Number of torrents in the '" + state_name + "' state") def fetch_info(mode): diff --git a/plugins/tplink/example-graphs/tl_sg-day.png b/plugins/tplink/example-graphs/tl_sg-day.png new file mode 100644 index 00000000..5567089c Binary files /dev/null and b/plugins/tplink/example-graphs/tl_sg-day.png differ diff --git a/plugins/tplink/tl_sg b/plugins/tplink/tl_sg new file mode 100755 index 00000000..6dfad113 --- /dev/null +++ b/plugins/tplink/tl_sg @@ -0,0 +1,239 @@ +#!/usr/bin/perl +# -*- perl -*- +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 dated June, +# 1991. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Magic markers (used by munin-node-configure and some installation scripts): +#%# family=auto +#%# capabilities=autoconf + +use strict; +use warnings; +use WWW::Mechanize; + +=pod + +=encoding UTF-8 + +=head1 NAME + +tl_sg - Plugin to monitor packets per second and link speed for TP-Link SG108E/SG1016E switches + +=head1 APPLICABLE SYSTEMS + +TP-Link SG108E/SG1016E switches with web management (http). Tested with software version 1.0.2 Build 20160526 Rel.34615 on TL SG108E + +=head1 CONFIGURATION + +Add this to the relevant munin-node config file. You can specify switch address, username, password and description for each port +(the switch management doesn't allow port descriptions). You should also create a fake host for the switch and attach the graphs to it. +Details here: https://cweiske.de/tagebuch/munin-multiple-hosts.htm + +In /etc/munin/munin.conf add a new host called tl-sg108e (or whatever you want): + [tl-sg108e] + address 127.0.0.1 + use_node_name no + +In /etc/munin/plugin-conf.d/munin-node add the following entry: + + [tl_sg] + host_name tl-sg108e + env.host 192.168.1.12 + env.port 80 + env.numberOfPorts 8 + env.username admin + env.password mySecretPassword + env.p1 'Link to PC1' + env.p2 'Link to server1' + env.p3 'Not used' + env.p4 'Link to AP' + env.p5 'Link to PC2' + env.p6 'Link to PC3' + env.p7 'Not used' + env.p8 'Uplink' + +The name in host_name must match the name defined in munin.conf, and the tl_sg name must match the plugin instance name (symlink). + +If you're monitoring multiple switches, create different symlinks in /etc/munin/plugins pointing to this plugin and use the symlink +name as a configuration section as described above. + +Requires WWW:Mechanize module: + sudo apt-get install libwww-mechanize-perl + +=head1 BUGS/GOTCHAS + +The link speed is represented as a number: + 0 - down + 5 - 100Mbps full + 6 - 1Gbps + +=head1 AUTHOR + +Adrian Popa (https://github.com/mad-ady) + +=head1 COPYRIGHT + +Copyright (c) 2018, Adrian Popa + +All rights reserved. This program is free software; you can +redistribute it and/or modify it under the terms of the GNU General +Public License as published by the Free Software Foundation; version 2 +dated June, 1991. + +=head1 VERSION + + 1.1 + +=cut + +# read parameters from munin +my $host = ( $ENV{host} || '192.168.1.1' ); +my $tcpport = ( $ENV{port} || '80' ); +my $username = ( $ENV{username} || 'admin' ); +my $password = ( $ENV{password} || 'admin' ); +my $numberOfPorts = ( $ENV{numberOfPorts} || '8' ); + +my %speedMapping = ( + 0 => "down", + 1 => "auto", + 2 => "10M half-duplex", + 3 => "10M full-duplex", + 4 => "100M half-duplex", + 5 => "100M full-duplex", + 6 => "1G full-duplex", +); + + +#populate the ports and descriptions based on ENV +my %ports = (); +for ( 1 .. $numberOfPorts ) { + my $i = $_; + if ( defined $ENV{"p$i"} ) { + $ports{$i} = $ENV{"p$i"}; + } + else { + #no description + $ports{$i} = "Port $i"; + } +} + +if ( $ARGV[0] and $ARGV[0] eq 'autoconf' ) { + print "no (manual configuration needed)\n"; + exit 0; +} + +if ( $ARGV[0] and $ARGV[0] eq "config" ) { + foreach my $graphType (qw/Packets Speed/) { + foreach my $port ( sort keys %ports ) { + print "multigraph ${graphType}_if_$port\n", + "graph_title $graphType for $host port $port $ports{$port}\n", + "graph_info $graphType graph for port $port $ports{$port}\n", + "graph_args --base 1000 -l 0\n", + "graph_scale yes\n", + "graph_category network\n"; + if ( $graphType eq 'Speed' ) { + print "graph_vlabel speed\n"; + foreach my $value (sort keys %speedMapping){ + print "p${port}_$value.label $speedMapping{$value}\n"; + print "p${port}_$value.type GAUGE\n"; + print "p${port}_$value.draw AREA\n"; + } + } + else { + print "graph_vlabel packets\n"; + foreach my $gb (qw/good bad/) { + foreach my $direction (qw/tx rx/) { + print "p${port}${direction}${gb}.label Port $port $direction ($gb)\n"; + print "p${port}${direction}${gb}.type COUNTER\n"; + } + } + } + } + } + exit 0; +} + +# extract data from the switch - uses web scraping (tested with 1.0.2 Build 20160526 Rel.34615) + +# print STDERR "Connecting to $host with user $username"; +my $mech = WWW::Mechanize->new( + autocheck => 0, + requests_redirectable => [ 'GET', 'HEAD', 'POST' ] +); +my $result = $mech->post( + "http://$host:$tcpport/logon.cgi", + [ username => $username, password => $password, logon => 'Login' ], + "Referer" => "http://$host:$tcpport/" +); +my $response = $mech->response(); + +# navigate to the page with the network stats +$result = $mech->get("http://$host:$tcpport/PortStatisticsRpm.htm"); +$response = $mech->response(); + +#print STDERR $response->code()."\n"; + +# get the data +my $data = $mech->content( raw => 1 ); + +#print STDERR "$data\n"; + +# The page stores the data in a table, but internally it is stored in 3 javascript arrays: +# state:[1,1,1,1,1,1,1,1,0,0], +# link_status:[0,5,0,0,5,5,5,6,0,0], +# pkts:[0,0,0,0,14141090,0,10461386,0,14226,0,12252,0,0,0,0,0,2872063,0,1402200,0,59764503,0,34619246,0,4913873,0,4393574,0,44170456,0,68499653,0,0,0] + +# state: 1 - Enabled, 0 - Disabled (administratively) +# link_status: 0 - down, 1 - auto, 2 - 10Mbps half, 3 - 10Mbps full, 4 - 100Mbps half, 5 - 100Mbps full, 6 - 1Gbps full +# pkts: every group of 4 values represent txGoodPkt, txBadPkt, rxGoodPkt, rxBadPkt + +# parse good/bad packets +if ( $data =~ /pkts:\[([0-9,]+)\]/ ) { + + my $packetString = $1; + my @packets = split( /,/, $packetString ); + + for ( 1 .. $numberOfPorts ) { + my $currentPort = $_; + my $txGoodPkt = $packets[ ( $currentPort - 1 ) * 4 ]; + my $txBadPkt = $packets[ ( $currentPort - 1 ) * 4 + 1 ]; + my $rxGoodPkt = $packets[ ( $currentPort - 1 ) * 4 + 2 ]; + my $rxBadPkt = $packets[ ( $currentPort - 1 ) * 4 + 3 ]; + print "multigraph Packets_if_$currentPort\n"; + + print "p${currentPort}txgood.value $txGoodPkt\n"; + print "p${currentPort}rxgood.value $rxGoodPkt\n"; + print "p${currentPort}txbad.value $txBadPkt\n"; + print "p${currentPort}rxbad.value $rxBadPkt\n"; + } +} + +# parse link speed +if ( $data =~ /link_status:\[([0-9,]+)\]/ ) { + + my $linkString = $1; + my @links = split( /,/, $linkString ); + for ( 1 .. $numberOfPorts ) { + my $currentPort = $_; + my $link = $links[ $currentPort - 1 ]; + print "multigraph Speed_if_$currentPort\n"; + foreach my $value (sort keys %speedMapping){ + print "p${currentPort}_$value.value ".(($value eq $link)?1:0)."\n"; + } + } + +} + +# vim: ft=perl : ts=4 : expandtab diff --git a/plugins/trafic_ro/trafic_ro_24h b/plugins/trafic_ro/trafic_ro_24h index fe49fcf2..1c5d2eb8 100755 --- a/plugins/trafic_ro/trafic_ro_24h +++ b/plugins/trafic_ro/trafic_ro_24h @@ -13,5 +13,5 @@ if [ "$1" = "config" ]; then exit 0 fi -VISITORS="$(echo 'munin' | curl curl --silent -X POST -H 'Content-type: text/xml' -d @- http://api.trafic.ro/rest/0.01/sumar-site/$RID | xmlstarlet sel -t -m "/sumar-site/vizitatori_ultimele_24_ore" -v ".")" +VISITORS="$(echo 'munin' | curl --silent -X POST -H 'Content-type: text/xml' -d @- http://api.trafic.ro/rest/0.01/sumar-site/$RID | xmlstarlet sel -t -m "/sumar-site/vizitatori_ultimele_24_ore" -v ".")" echo "24h_visitors.value" $VISITORS; diff --git a/plugins/twemproxy/nutcracker_requests_ b/plugins/twemproxy/nutcracker_requests_ index bc06d654..655ed61c 100755 --- a/plugins/twemproxy/nutcracker_requests_ +++ b/plugins/twemproxy/nutcracker_requests_ @@ -41,7 +41,7 @@ def process_data(): if(type(value) == dict): total = 0 # get server requests - for pool_key, pool_value in value.iteritems(): + for pool_key, pool_value in value.items(): if(type(pool_value) == dict): total += pool_value["requests"] print "requests_"+key+".value"+" "+str(total) @@ -52,7 +52,7 @@ def process_config(): print "graph_vlabel requests/s" data = get_stats(); - for key, value in data.iteritems(): + for key, value in data.items(): if(type(value) == dict): print "requests_"+key+".label "+key print "requests_"+key+".type COUNTER" diff --git a/plugins/ubiquiti/example-graphs/unifi_api-clients_by_device-detail.png b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_device-detail.png new file mode 100644 index 00000000..7e3d44a5 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_device-detail.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-clients_by_device.png b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_device.png new file mode 100644 index 00000000..4ac539f7 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_device.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-clients_by_type-detail.png b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_type-detail.png new file mode 100644 index 00000000..a89e32fe Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_type-detail.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-clients_by_type.png b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_type.png new file mode 100644 index 00000000..8d74aaf0 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-clients_by_type.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-device_cpu.png b/plugins/ubiquiti/example-graphs/unifi_api-device_cpu.png new file mode 100644 index 00000000..0e5e7ef2 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-device_cpu.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-device_load.png b/plugins/ubiquiti/example-graphs/unifi_api-device_load.png new file mode 100644 index 00000000..ebecac8c Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-device_load.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-device_mem.png b/plugins/ubiquiti/example-graphs/unifi_api-device_mem.png new file mode 100644 index 00000000..c6fc0073 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-device_mem.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-device_uptime.png b/plugins/ubiquiti/example-graphs/unifi_api-device_uptime.png new file mode 100644 index 00000000..92e22c9a Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-device_uptime.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_device-detail.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_device-detail.png new file mode 100644 index 00000000..2ffb1594 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_device-detail.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_device.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_device.png new file mode 100644 index 00000000..c5b1cd4d Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_device.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_network-detail.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_network-detail.png new file mode 100644 index 00000000..33f77e6e Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_network-detail.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_network.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_network.png new file mode 100644 index 00000000..af71a0de Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_network.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_port-detail.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_port-detail.png new file mode 100644 index 00000000..282e5d0a Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_port-detail.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_port.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_port.png new file mode 100644 index 00000000..570f625c Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_port.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_radio-detail.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_radio-detail.png new file mode 100644 index 00000000..14bcd0e4 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_radio-detail.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_radio.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_radio.png new file mode 100644 index 00000000..f60ae9b9 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_radio.png differ diff --git a/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_uplink.png b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_uplink.png new file mode 100644 index 00000000..1e7c5b02 Binary files /dev/null and b/plugins/ubiquiti/example-graphs/unifi_api-xfer_by_uplink.png differ diff --git a/plugins/ubiquiti/unifi_api b/plugins/ubiquiti/unifi_api new file mode 100755 index 00000000..003a8fc4 --- /dev/null +++ b/plugins/ubiquiti/unifi_api @@ -0,0 +1,1212 @@ +#!/usr/bin/perl +# -*- perl -*- + +=encoding utf8 + +=head1 NAME + +unifi_api - Munin plugin to display device and network information from the + Ubiquiti unifi API + +=head1 APPLICABLE SYSTEMS + +Unifi controllors with direct API access + +Controller version 5+ required (tested with 5.8.x) + +WebRTC is not supported at this time + +=head1 CONFIGURATION + +This script uses the multigraph functionality to generate many graphs. As such, there +are a significant amount of available configuration options + +=head2 API Details + +You will need to supply your API login details: + + [unifi_api] + # User name to login to unifi controller API. Default is "ubnt". Ideally, this should + # point to a read-only account. + env.user Controller_Username + + # Password to login to unifi controller API. Default is "ubnt" + env.pass Controller_Password + + # URL of the API, with port if needed. No trailing slash. + # Default is https://localhost:8443 + env.api_url https://unifi.fqdn.com:8443 + + # Verify SSL certificate name against host. + # Note: if using a default cloudkey certificate, this will fail unless you manually add it + # to the local keystore. + # Default is "yes" + env.ssl_verify_host no + + # Verify Peer's SSL vertiicate. + # Note: if using a default cloudkey certificate, this will fail + # Default is "yes" + env.ssl_verify_peer no + + # The human readable name of the unifi site - used for graph titles + env.name Site Name + + # "Site" string - the internal unifi API site identifier. + # default is "default" - found when you connect to the web interface + # it's the term in the URL - /manage/site/site_string/dashboard + env.site site_string + + +=head2 Graph Categories / Host Management + +Sometimes, you need more control over where the unifi graphs appear. + + env.force_category 0 + # By default, Use standard munin well know categories - + # system: cpu, mem, load, & uptime + # network: clients, transfer statistics. + # + +To use this feature, set "force_category" to a text string (i.e. "unifi"). + +This is very helpful if your graphs are going to appear inside another host - for instance +if your munin graphs for that host are monitoring the host the controller is running on, and +the unifi API instance. + +Sometimes however, you want to monitor either an offsite API, or a cloudkey which, at least by +default, does not run munin-node. In that case, you can actually create a "virtual" munin host to +display only these graphs (or any combination you like). This is documented in the main munin docs, +but in a nutshell: + +In your munin-node plugin configuration: (Something like: /etc/munin/plugin-conf.d/munin-node) + + [unifi_api] + host_name hostname.whatever.youlike + env.force_category unifi + +And, in your munin master configuration: (Something like: /etc/munin/munin.conf) + + [hostname.whatever.youlike] + address ip.of.munin.node + +Make sure you do *not* set "use_node_name" on this new host. It may be necessary to define "host_name" +in your munin-node configuration as well, if you have not already (Likely, on a multi-homed host, this +has been done to keep munin-node from advertising itself as localhost) + +More information: + + * L + + +=head2 Toggling of graphs / Individual options + +You can turn off individual graphs. A few graphs have extra configuration +options. + +By default, everything is enabled. Set to "no" to disable + + [unifi_api] + # Show device CPU utilization + env.enable_device_cpu yes + + # Show device memory usage + env.enable_device_mem yes + + # Show device load average (switches and APs only) + env.enable_device_load yes + + # Show device uptime + env.enable_device_uptime yes + + # Show number of clients connected to each device + env.enable_clients_device yes + # Show detailed graphs for each device (per device graphs) + env.enable_detail_clients_device yes + + # Show number of clients connected to each network type + env.enable_clients_type yes + # Show detailed graphs for each client type (per type graphs) + env.enable_detail_clients_type yes + # Show unauthorized / authorized client list + # if you are not using the guest portal, this is useless + env.show_authorized_clients_type yes + + # Show transfer statistics on switch ports + env.enable_xfer_port yes + # Show detailed graphs per switch port + env.enable_detail_xfer_port yes + # Hide ports that have no link (When set to no, unplugged ports will transfer 0, not be undefined) + env.hide_empty_xfer_port yes + + # Show transfer statistics per device + env.enable_xfer_device yes + # Show detailed graphs for each device + env.enable_detail_xfer_device yes + + # Show transfer statistics per named network + env.enable_xfer_network yes + # Show detailed graphs for each named network + env.enable_detail_xfer_network yes + + # Show transfer statistics per radio + env.enable_xfer_radio yes + # Show detailed graphs for each radio + env.enable_detail_xfer_radio yes + + +=head1 CAPABILITIES + +This plugin supports L + +=head1 DEPENDENCIES + +This plugin requires munin-multiugraph. + +=over + +=item WWW::Curl::Easy + +Perl extension interface for libcurl + +=item JSON + + JSON (JavaScript Object Notation) encoder/decoder + +=back + +=head1 PERFORMANCE CONCERNS + +The main performance concern on this is the huge number of graphs that may be +generated. Using the cron version of munin-graph may hurt a lot. + +A bit of a case study: + + | My Site | UBNT Demo +--------------------------------------- +Devices | 8 | 126 +AP's | 4 | 118 +24xSwitch | 1 | 5 +8xSwitch | 2 | 2 +Output Bytes | 64,262 | 431,434 +Output Lines | 1,761 | 14,586 +Output Graphs | 77 | 530 + +So, just note that the growth in the amount of graphed date can be extreme. + + +=head1 LICENSE + +Copyright (C) 2018 J.T.Sage (jtsage@gmail.com) + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see L. + +=head1 MAGIC MARKERS + + #%# family=manual + #%# capabilities= + +=cut + +use warnings; +use strict; +use utf8; +use Munin::Plugin; + +# Check dependencies +my @errorCode; +my $me = (split '/', $0)[-1]; + +if (! eval {require JSON; JSON->import(); 1; } ) { + push @errorCode, "JSON module not found"; +} +if (! eval {require WWW::Curl::Easy; 1;} ) { + push @errorCode, "WWW::Curl::Easy module not found"; +} + +# Fail on not found dependencies +if ( @errorCode != 0 ) { + die "FATAL:$me: Perl dependencies not installed (", join(", " => @errorCode), ")\n"; +} + +# Multigraph cabability is required for this plugin +need_multigraph(); + +# Somewhat (in)sane defaults for host, pass, etc +my %APIconfig = ( + 'user' => env_default_text('user' , 'ubnt'), + 'pass' => env_default_text('pass' , 'ubnt'), + 'api_url' => env_default_text('api_url' , 'https://localhost:8443'), + 'site' => env_default_text('site' , 'default'), + 'ssl_verify_host' => env_default_text('ssl_verify_host', 'yes'), + 'ssl_verify_peer' => env_default_text('ssl_verify_peer', 'yes'), + 'name' => env_default_text('name' , 'Unnamed Site'), +); + +# The big table of plugin options - see POD documentation for what these do. +my %PluginConfig = ( + 'enable_device_cpu' => env_default_bool_true('enable_device_cpu'), + 'enable_device_mem' => env_default_bool_true('enable_device_mem'), + 'enable_device_load' => env_default_bool_true('enable_device_load'), + 'enable_device_uptime' => env_default_bool_true('enable_device_uptime'), + 'enable_clients_device' => env_default_bool_true('enable_clients_device'), + 'enable_clients_type' => env_default_bool_true('enable_clients_network'), + 'enable_xfer_port' => env_default_bool_true('enable_xfer_port'), + 'enable_xfer_device' => env_default_bool_true('enable_xfer_device'), + 'enable_xfer_network' => env_default_bool_true('enable_xfer_network'), + 'enable_xfer_radio' => env_default_bool_true('enable_xfer_radio'), + 'enable_detail_xfer_port' => env_default_bool_true('enable_detail_xfer_port'), + 'enable_detail_xfer_device' => env_default_bool_true('enable_detail_xfer_device'), + 'enable_detail_xfer_network' => env_default_bool_true('enable_detail_xfer_network'), + 'enable_detail_xfer_radio' => env_default_bool_true('enable_detail_xfer_radio'), + 'enable_detail_clients_device' => env_default_bool_true('enable_detail_clients_device'), + 'enable_detail_clients_type' => env_default_bool_true('enable_detail_clients_network'), + 'hide_empty_xfer_port' => env_default_bool_true('hide_empty_xfer_port'), + 'show_authorized_clients_type' => env_default_bool_true('show_authorized_clients_type'), + 'force_category' => env_default_text('force_category', 0), +); + +# Set up needed API endpoints +my %APIPoint = ( + 'login' => $APIconfig{"api_url"} . "/api/login", + 'device' => $APIconfig{"api_url"} . "/api/s/" . $APIconfig{"site"} . "/stat/device", + 'wlan' => $APIconfig{"api_url"} . "/api/s/" . $APIconfig{"site"} . "/rest/wlanconf", + 'sta' => $APIconfig{"api_url"} . "/api/s/" . $APIconfig{"site"} . "/stat/sta", +); + +my %APIResponse; +my %APIJsonResponse; +my %Data; +my $retcode; + +# Init curl and JSON +my $curl = WWW::Curl::Easy->new() or die "FATAL:$me: WWW::Curl::Easy init failed!\n"; +my $jsonOBJ = JSON->new() or die "FATAL:$me: JSON init failed!\n"; + + +## Fetch the data from the API + +# The rest is a way to use local files from the local disk. Undocumented and not really supported. + +if ( !env_default_bool_true('USE_API') ) { + if (! eval {require File::Slurp; File::Slurp->import(); 1; } ) { + die "Local debug unavailable, File::Slurp CPAN module required\n"; + } + foreach ( "./demo-test-files/device", "./demo-test-files/sta", "./demo-test-files/wlanconf" ) { + if ( ! -f $_ ) { die "File not found: $_\n"; } + } + $APIJsonResponse{'device'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode(read_file('./demo-test-files/device')); + $APIJsonResponse{'sta'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode(read_file('./demo-test-files/sta')); + $APIJsonResponse{'wlan'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode(read_file('./demo-test-files/wlanconf')); +} else { + fetch_data(); +} + + +## Process the data + +make_data(); + +if ( defined($ARGV[0]) && $ARGV[0] eq "config" ) { + # Do the config step for each set of graphs + do_config_mem(); + do_config_cpu(); + do_config_load(); + do_config_uptime(); + do_config_xfer_by_device(); + do_config_xfer_by_uplink(); + do_config_xfer_by_port(); + do_config_xfer_by_network(); + do_config_xfer_by_radio(); + do_config_clients_by_device(); + do_config_clients_by_type(); + + # If dirtyconfig is not supported, or turned off, exit here. Otherwise, continue to fetch section + if ( !defined($ENV{'MUNIN_CAP_DIRTYCONFIG'}) || !$ENV{'MUNIN_CAP_DIRTYCONFIG'} ) { exit 0; } +} + +# Do the fetch step for each set of graphs +do_values_cpu(); +do_values_mem(); +do_values_load(); +do_values_uptime(); +do_values_xfer_by_device(); +do_values_xfer_by_uplink(); +do_values_xfer_by_port(); +do_values_xfer_by_network(); +do_values_xfer_by_radio(); +do_values_clients_by_device(); +do_values_clients_by_type(); + + + + + + + +####################### +# SUBROUTINES CONFIG # +####################### + +sub do_config_clients_by_type { + # Provide client count by type - CONFIG + if ( !$PluginConfig{'enable_clients_type'} ) { return 0; } + + graph_prologue( + 'unifi_clients_per_network', + 'Clients Connected / Network', + '-l 0 --base 1000', + 'clients', + 'network', + 'Clients connected per type - manually summing these numbers may be meaningful, as clients are often of multiple types' + ); + + foreach ( @{$Data{'typesOrder'}} ) { + print $_ , ".label " , $Data{'types'}{$_}[0] , "\n"; + } + + if ( ! $PluginConfig{'enable_detail_clients_type'} ) { return 1; } + + foreach ( @{$Data{'typesOrder'}} ) { + if ( $Data{'types'}{$_}[1] == 1 ) { + graph_prologue( + 'unifi_clients_per_network.' . $_, + 'Clients Connected : ' . $Data{'types'}{$_}[0], + '-l 0 --base 1000', + 'clients', + 'network', + 'Clients connected via that are of type: ' . $Data{'types'}{$_}[0] + ); + print "users.label Users\n"; + print "guests.label Guests\n"; + } + } + return 1; +} + +sub do_config_clients_by_device { + # Provide client count by device - CONFIG + if ( !$PluginConfig{'enable_clients_device'} ) { return 0; } + + graph_prologue( + 'unifi_clients_per_device', + 'Clients Connected / Device', + '-l 0 --base 1000', + 'clients', + 'network', + 'Clients connected to each unifi device' + ); + + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".label " , $Data{'device'}{$_}->{'label'} , "\n"; + } + + if ( ! $PluginConfig{'enable_detail_clients_device'} ) { return 1; } + + foreach ( sort keys %{$Data{'device'}} ) { + graph_prologue( + 'unifi_clients_per_device.' . $_, + 'Clients / Device : ' . $Data{'device'}{$_}->{'label'}, + '-l 0 --base 1000', + 'clients', + 'network', + 'Clients connected to the ' . $Data{'device'}{$_}->{'label'} . " unifi device" + ); + print "users.label Users\n"; + print "guests.label Guests\n"; + } + return 1; +} + +sub do_config_xfer_by_radio { + # Provide transfer for radios - CONFIG + if ( !$PluginConfig{'enable_xfer_radio'} ) { return 0; } + + graph_prologue( + 'unifi_xfer_per_radio', + 'Transfer / radio', + '--base 1000', + 'Packets/${graph_period}', + 'network', + 'Number of packets transferred per individual radio band' + ); + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "uap" ) { next; } + + foreach ( @{$Data{'device'}{$thisDevice}{'radio'}} ) { + print $thisDevice , "_" , $_->{"name"} , "_pack.label " , $_->{"label"} , "\n"; + print $thisDevice , "_" , $_->{"name"} , "_pack.type DERIVE\n"; + print $thisDevice , "_" , $_->{"name"} , "_pack.min 0\n"; + } + } + + if ( ! $PluginConfig{'enable_detail_xfer_radio'} ) { return 1; } + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "uap" ) { next; } + + graph_prologue( + 'unifi_xfer_per_radio.' . $thisDevice, + 'Transfer / radio : ' . $Data{'device'}{$thisDevice}->{'name'}, + '--base 1000', + 'Packets/${graph_period}', + 'network', + 'Transfered Packets, Dropped / Retried Packets, and Error Packets for the WLAN device: ' . $Data{'device'}{$thisDevice}->{'name'} + ); + + foreach ( @{$Data{'device'}{$thisDevice}{'radio'}} ) { + print $_->{"name"} , "_pkt.label " , $_->{"type"} , " Packets\n"; + print $_->{"name"} , "_pkt.type DERIVE\n"; + print $_->{"name"} , "_pkt.min 0\n"; + print $_->{"name"} , "_dret.label " , $_->{"type"} , " Dropped / Retries\n"; + print $_->{"name"} , "_dret.type DERIVE\n"; + print $_->{"name"} , "_dret.min 0\n"; + print $_->{"name"} , "_err.label " , $_->{"type"} , " Errors\n"; + print $_->{"name"} , "_err.type DERIVE\n"; + print $_->{"name"} , "_err.min 0\n"; + } + } + return 1; +} + +sub do_config_xfer_by_network { + # Provide transfer for named networks - CONFIG + if ( !$PluginConfig{'enable_xfer_network'} ) { return 0; } + + graph_prologue( + 'unifi_xfer_per_network', + 'Transfer / named network', + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received per each named network' + ); + + foreach my $thisNet ( sort keys %{$Data{'networks'}} ) { + foreach ( "_rxbytes", "_txbytes" ) { + print $thisNet , $_ , ".label " , $Data{'networks'}{$thisNet}->{"label"} . "\n"; + print $thisNet , $_ , ".type DERIVE\n"; + print $thisNet , $_ , ".min 0\n"; + } + print $thisNet , "_rxbytes.graph no\n"; + print $thisNet , "_txbytes.negative " , $thisNet , "_rxbytes\n"; + } + + if ( ! $PluginConfig{'enable_detail_xfer_network'} ) { return 1; } + + foreach my $thisNet ( sort keys %{$Data{'networks'}} ) { + graph_prologue( + 'unifi_xfer_per_network.' . $thisNet, + 'Transfer / named network : ' . $Data{'networks'}{$thisNet}->{'label'}, + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received for the network named: ' . $Data{'networks'}{$thisNet}->{'label'} + ); + foreach ( "rxbyte", "txbyte" ) { + print $_ , ".label Bytes\n"; + print $_ , ".type DERIVE\n"; + print $_ , ".min 0\n"; + } + print "rxbyte.graph no\n"; + print "txbyte.negative rxbyte\n"; + } + return 1; +} + +sub do_config_xfer_by_port { + # Provide transfer for switch ports - CONFIG + if ( !$PluginConfig{'enable_xfer_port'} ) { return 0; } + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "usw" ) { next; } + graph_prologue( + 'unifi_xfer_per_port_' . $thisDevice, + 'Transfer / port : ' . $Data{'device'}{$thisDevice}->{'label'}, + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received per port on the switch named: ' . $Data{'device'}{$thisDevice}->{'label'} + ); + foreach my $thisPort ( @{$Data{'device'}{$thisDevice}{'ports'}} ) { + foreach ( "_rxbytes", "_txbytes" ) { + print $thisDevice , "_" , $thisPort->{"name"} , $_ , ".label " , $thisPort->{"label"} . "\n"; + print $thisDevice , "_" , $thisPort->{"name"} , $_ , ".type DERIVE\n"; + print $thisDevice , "_" , $thisPort->{"name"} , $_ , ".min 0\n"; + } + print $thisDevice , "_" , $thisPort->{"name"} , "_rxbytes.graph no\n"; + print $thisDevice , "_" , $thisPort->{"name"} , "_txbytes.negative " , $thisDevice , "_" , $thisPort->{"name"} , "_rxbytes\n"; + } + } + + if ( ! $PluginConfig{'enable_detail_xfer_port'} ) { return 1; } + + # Extended graphs + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "usw" ) { next; } + foreach my $thisPort ( @{$Data{'device'}{$thisDevice}{'ports'}} ) { + graph_prologue( + 'unifi_xfer_per_port_' . $thisDevice . "." . $thisPort->{'name'}, + 'Transfer / port : ' . $Data{'device'}{$thisDevice}->{'label'} . " : " . $thisPort->{'label'}, + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received on port "' . $thisPort->{'label'} . '" of the switch "' . $Data{'device'}{$thisDevice}->{'label'} . '"' + ); + foreach ( "rxbyte", "txbyte" ) { + print $_ . ".label Bytes\n"; + print $_ . ".type DERIVE\n"; + print $_ . ".min 0\n"; + } + print "rxbyte.graph no\n"; + print "txbyte.negative rxbyte\n"; + } + } + return 1; +} + +sub do_config_xfer_by_uplink { + # Provide transfer for unifi uplink - CONFIG + if ( !$PluginConfig{'enable_xfer_device'} ) { return 0; } + + graph_prologue( + 'unifi_xfer_by_uplink', + 'Transfer on uplink : ' . $Data{'uplink'}{'devName'}, + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received on the WAN port of the USG, and the speedtest result of the same port' + ); + + foreach ( "rx", "tx" ) { + print $_ , "_speed.label Speedtest\n"; + print $_ , "_bytes.label Transferred\n"; + print $_ , "_speed.type GAUGE\n"; + print $_ , "_bytes.type DERIVE\n"; + print $_ , "_speed.min 0\n"; + print $_ , "_bytes.min 0\n"; + } + + print "rx_speed.graph no\n"; + print "rx_bytes.graph no\n"; + print "tx_speed.negative rx_speed\n"; + print "tx_bytes.negative rx_bytes\n"; + + return 1; +} + +sub do_config_xfer_by_device { + # Provide transfer for each unifi device - CONFIG + if ( !$PluginConfig{'enable_xfer_device'} ) { return 0; } + + graph_prologue( + 'unifi_xfer_per_device', + 'Transfer / device', + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received per unifi device' + ); + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + foreach ( "_rxbytes", "_txbytes" ) { + print $thisDevice , $_ , ".label " , $Data{'device'}{$thisDevice}->{'label'} , "\n"; + print $thisDevice , $_ , ".type DERIVE\n"; + print $thisDevice , $_ , ".min 0\n"; + } + print $thisDevice , "_rxbytes.graph no\n"; + print $thisDevice , "_txbytes.negative " , $thisDevice , "_rxbytes\n"; + } + + if ( $PluginConfig{'enable_detail_xfer_device'} ) { + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + graph_prologue( + 'unifi_xfer_per_device.' . $thisDevice, + 'Transfer / device : ' . $Data{'device'}{$thisDevice}->{'label'}, + '--base 1000', + 'Bytes/${graph_period} rcvd (-) / trans (+)', + 'network', + 'Bytes sent and received on the unifi device named: ' . $Data{'device'}{$thisDevice}->{'label'} + ); + foreach ( "rxbyte", "txbyte" ) { + print $_ , ".label Bytes\n"; + print $_ , ".type DERIVE\n"; + print $_ , ".min 0\n"; + } + print "rxbyte.graph no\n"; + print "txbyte.negative rxbyte\n"; + } + } + return 1; +} + +sub do_config_uptime { + # Provide device uptime for each unifi device - CONFIG + if ( !$PluginConfig{'enable_device_uptime'} ) { return 0; } + graph_prologue( + 'unifi_device_uptime', + 'Uptime', + '--base 1000 -r --lower-limit 0', + 'days', + 'system', + 'Uptime in days for each unifi device' + ); + + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".label " , $Data{'device'}{$_}->{"name"} , "\n"; + } + return 1; +} + +sub do_config_cpu { + # Provide device CPU usage for each unifi device - CONFIG + if ( !$PluginConfig{'enable_device_cpu'} ) { return 0; } + graph_prologue( + 'unifi_device_cpu', + 'CPU Usage', + '--base 1000 -r --lower-limit 0 --upper-limit 100', + '%', + 'system', + 'CPU usage as a percentage for each unifi device' + ); + + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".label " , $Data{'device'}{$_}->{"name"} , "\n"; + } + return 1; +} + +sub do_config_load { + # Provide device load average for each unifi device - CONFIG + if ( !$PluginConfig{'enable_device_load'} ) { return 0; } + graph_prologue( + 'unifi_device_load', + 'Load Average', + '-l 0 --base 1000', + 'load', + 'system', + 'Load average for each unifi Access Point or Switch' + ); + + foreach ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$_}->{'type'} eq 'ugw' ) { next; } + print $_ , ".label " , $Data{'device'}{$_}->{"name"} , "\n"; + } + return 1; +} + +sub do_config_mem { + # Provide device memory usage for each unifi device - CONFIG + if ( !$PluginConfig{'enable_device_mem'} ) { return 0; } + graph_prologue( + 'unifi_device_mem', + 'Memory Usage', + '--base 1000 -r --lower-limit 0 --upper-limit 100', + '%', + 'system', + 'Memory usage as a percentage for each unifi device' + ); + + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".label " , $Data{'device'}{$_}->{"name"} , "\n"; + } + return 1; +} + + + + + + + + + + +######################### +# SUBROUTINES VALUES # +######################### + +sub do_values_clients_by_type { + # Provide client count by type - VALUES + if ( !$PluginConfig{'enable_clients_type'} ) { return 0; } + + print "multigraph unifi_clients_per_network\n"; + + foreach ( @{$Data{'typesOrder'}} ) { + print $_ , ".value " , ( $Data{'types'}{$_}[2] + $Data{'types'}{$_}[3] ) , "\n"; + } + + if ( ! $PluginConfig{'enable_detail_clients_type'} ) { return 1; } + + foreach ( @{$Data{'typesOrder'}} ) { + if ( $Data{'types'}{$_}[1] == 1 ) { + print "multigraph unifi_clients_per_network.$_\n"; + print "users.value " , $Data{'types'}{$_}[2] , "\n"; + print "guests.value " , $Data{'types'}{$_}[3] , "\n"; + } + } + return 1; +} + +sub do_values_clients_by_device { + # Provide client count by device - VALUES + if ( !$PluginConfig{'enable_clients_device'} ) { return 0; } + + print "multigraph unifi_clients_per_device\n"; + + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".value " , $Data{'device'}{$_}->{'clients'} , "\n"; + } + + if ( ! $PluginConfig{'enable_detail_clients_device'} ) { return 1; } + + foreach ( sort keys %{$Data{'device'}} ) { + print "multigraph unifi_clients_per_device.$_\n"; + print "users.value " , $Data{'device'}{$_}->{'users'} , "\n"; + print "guests.value " , $Data{'device'}{$_}->{'guests'} , "\n"; + } + return 1; +} + +sub do_values_xfer_by_radio { + # Provide transfer for radios - VALUES + if ( !$PluginConfig{'enable_xfer_radio'} ) { return 0; } + + print "multigraph unifi_xfer_per_radio\n"; + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "uap" ) { next; } + + foreach ( @{$Data{'device'}{$thisDevice}{'radio'}} ) { + print $thisDevice , "_" , $_->{"name"} , "_pack.value " , $_->{"pckt"} , "\n";; + } + } + + if ( ! $PluginConfig{'enable_detail_xfer_radio'} ) { return 1; } + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "uap" ) { next; } + + print "multigraph unifi_xfer_per_radio.$thisDevice\n"; + + foreach ( @{$Data{'device'}{$thisDevice}{'radio'}} ) { + print $_->{"name"} , "_pkt.value " , $_->{"pckt"} , "\n"; + print $_->{"name"} , "_dret.value " , $_->{"dret"} , "\n"; + print $_->{"name"} , "_err.value " , $_->{"err"} , "\n"; + } + } + return 1; +} + +sub do_values_xfer_by_network { + # Provide transfer for named networks - CONFIG + if ( !$PluginConfig{'enable_xfer_network'} ) { return 0; } + + print "multigraph unifi_xfer_per_network\n"; + + foreach my $thisNet ( sort keys %{$Data{'networks'}} ) { + print $thisNet , "_rxbytes.value " , $Data{'networks'}{$thisNet}->{"rx"} , "\n"; + print $thisNet , "_txbytes.value " , $Data{'networks'}{$thisNet}->{"tx"} , "\n"; + } + + if ( ! $PluginConfig{'enable_detail_xfer_network'} ) { return 1; } + + foreach my $thisNet ( sort keys %{$Data{'networks'}} ) { + print "multigraph unifi_xfer_per_network.$thisNet\n"; + print "rxbyte.value " , $Data{'networks'}{$thisNet}->{"rx"} , "\n"; + print "txbyte.value " , $Data{'networks'}{$thisNet}->{"tx"} , "\n"; + } + return 1; +} + +sub do_values_xfer_by_port { + # Provide transfer for switch ports - VALUES + if ( !$PluginConfig{'enable_xfer_port'} ) { return 0; } + + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "usw" ) { next; } + print "multigraph unifi_xfer_per_port_$thisDevice\n"; + + foreach ( @{$Data{'device'}{$thisDevice}{'ports'}} ) { + print $thisDevice , "_" , $_->{"name"} , "_rxbytes.value " , $_->{"rx"} , "\n"; + print $thisDevice , "_" , $_->{"name"} , "_txbytes.value " , $_->{"tx"} , "\n"; + } + } + + if ( ! $PluginConfig{'enable_detail_xfer_port'} ) { return 1; } + + # Extended graphs + foreach my $thisDevice ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$thisDevice}->{'type'} ne "usw" ) { next; } + foreach ( @{$Data{'device'}{$thisDevice}{'ports'}} ) { + print 'multigraph unifi_xfer_per_port_' . $thisDevice . "." . $_->{'name'} . "\n"; + print "rxbyte.value " , $_->{"rx"} , "\n"; + print "txbyte.value " , $_->{"tx"} , "\n"; + } + } + return 1; +} + +sub do_values_xfer_by_uplink { + # Provide transfer for unifi uplink - CONFIG + if ( !$PluginConfig{'enable_xfer_device'} ) { return 0; } + + print "multigraph unifi_xfer_by_uplink\n"; + print "rx_speed.value " . $Data{'uplink'}{"rx_speed"} . "\n"; + print "tx_speed.value " . $Data{'uplink'}{"tx_speed"} . "\n"; + print "rx_bytes.value " . $Data{'uplink'}{"rx_bytes"} . "\n"; + print "tx_bytes.value " . $Data{'uplink'}{"tx_bytes"} . "\n"; + return 1; +} + +sub do_values_xfer_by_device { + # Provide transfer for each unifi device - CONFIG + if ( !$PluginConfig{'enable_xfer_device'} ) { return 0; } + + print "multigraph unifi_xfer_per_device\n"; + foreach ( sort keys %{$Data{'device'}} ) { + print $_ . "_rxbytes.value " . $Data{'device'}{$_}->{"rx"} , "\n"; + print $_ . "_txbytes.value " . $Data{'device'}{$_}->{"tx"} , "\n"; + } + if ( $PluginConfig{'enable_detail_xfer_device'} ) { + foreach ( sort keys %{$Data{'device'}} ) { + print "multigraph unifi_xfer_per_device." , $_ , "\n"; + print "rxbyte.value " , $Data{'device'}{$_}->{"rx"} , "\n"; + print "txbyte.value " , $Data{'device'}{$_}->{"tx"} , "\n"; + } + } + return 1; +} + +sub do_values_cpu { + # Provide device CPU usage for each unifi device - VALUES + if ( !$PluginConfig{'enable_device_cpu'} ) { return 0; } + + print "multigraph unifi_device_cpu\n"; + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".value " , ( $Data{'device'}{$_}->{"cpu"} ) , "\n"; + } + return 1; +} + +sub do_values_mem { + # Provide device memory usage for each unifi device - VALUES + if ( !$PluginConfig{'enable_device_mem'} ) { return 0; } + + print "multigraph unifi_device_mem\n"; + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".value " , ( $Data{'device'}{$_}->{"mem"} ) , "\n"; + } + return 1; +} + +sub do_values_load { + # Provide device load average for each unifi device - VALUES + if ( !$PluginConfig{'enable_device_load'} ) { return 0; } + + print "multigraph unifi_device_load\n"; + foreach ( sort keys %{$Data{'device'}} ) { + if ( $Data{'device'}{$_}->{'type'} eq 'ugw' ) { next; } + print $_ , ".value " , ( $Data{'device'}{$_}->{"load"} ) , "\n"; + } + return 1; +} + +sub do_values_uptime { + # Provide device uptime for each unifi device - VALUES + if ( !$PluginConfig{'enable_device_uptime'} ) { return 0; } + + print "multigraph unifi_device_uptime\n"; + foreach ( sort keys %{$Data{'device'}} ) { + print $_ , ".value " , ( $Data{'device'}{$_}->{"uptime"} / 86400 ) , "\n"; + } + return 1; +} + + + + + + + +######################### +# SUBROUTINES GENERAL # +######################### + +sub graph_prologue { + # Generate graph prologues - slightly less copy-pasta, and less chance for things to go wrong + my ( $id, $title, $args, $vlabel, $category, $info ) = (@_); + + print "multigraph $id\n"; + print 'graph_title ' , $title , ' : ' , $APIconfig{"name"} , "\n"; + print "graph_args $args\n"; + print "graph_vlabel $vlabel\n"; + if ( $PluginConfig{'force_category'} ) { + print "graph_category ", $PluginConfig{'force_category'}, "\n"; + } else { + print "graph_category $category\n"; + } + if ( $info ) { + print 'graph_info For the unifi site named "' , $APIconfig{"name"} , "\", $info\n"; + } + return 1; +} + +# Collate all collected data into something we can use. +sub make_data { + foreach my $thisDevice ( @{$APIJsonResponse{'device'}->{'data'}} ) { + # Grab everything we care to know about each device. + $Data{'device'}{ make_safe($thisDevice->{'name'}, $thisDevice->{'serial'}) } = { + 'label' => $thisDevice->{'name'}, + 'users' => ($thisDevice->{'user-num_sta'} || 0), + 'guests' => ($thisDevice->{'guest-num_sta'} || 0), + 'clients' => ($thisDevice->{'user-num_sta'} + $thisDevice->{'guest-num_sta'} || 0), + 'tx' => $thisDevice->{'rx_bytes'}, + 'rx' => $thisDevice->{'tx_bytes'}, + 'name' => $thisDevice->{'name'}, + 'uptime' => $thisDevice->{'uptime'}, + 'cpu' => $thisDevice->{'system-stats'}->{'cpu'}, + 'mem' => $thisDevice->{'system-stats'}->{'mem'}, + 'load' => ( $thisDevice->{'type'} eq 'ugw' ? 'U' : $thisDevice->{'sys_stats'}->{'loadavg_1'} ), + 'type' => $thisDevice->{'type'} + }; + + if ( $thisDevice->{'type'} eq 'ugw' ) { # Handle firewall specially, record uplink and networks + foreach my $thisNet ( @{$thisDevice->{'network_table'}} ) { + $Data{'networks'}{ make_safe($thisNet->{'name'}, $thisNet->{'_id'} ) } = { + 'label' => $thisNet->{'name'}, + 'tx' => $thisNet->{'tx_bytes'}, + 'rx' => $thisNet->{'rx_bytes'} + } + } + + $Data{'uplink'}{'devName'} = $thisDevice->{'name'}; + $Data{'uplink'}{'rx_speed'} = $thisDevice->{'speedtest-status'}->{'xput_download'} * 1000000; + $Data{'uplink'}{'tx_speed'} = $thisDevice->{'speedtest-status'}->{'xput_upload'} * 1000000; + + foreach ( @{$thisDevice->{"port_table"}} ) { + if ( $_->{name} eq "wan" ) { + $Data{'uplink'}{'rx_bytes'} = $_->{'rx_bytes'}; + $Data{'uplink'}{'tx_bytes'} = $_->{'tx_bytes'}; + } + } + } + + if ( $thisDevice->{'type'} eq 'usw' ) { # Handle swiches specially - record port stats + my @port_list; + + foreach my $port ( @{$thisDevice->{'port_table'}} ) { + if ( !$PluginConfig{'hide_empty_xfer_port'} || $port->{'up'} ) { + push @port_list , { + 'name' => 'port_' . zPad($port->{'port_idx'}), + 'label' => zPad($port->{'port_idx'}) . '-' . $port->{'name'}, + 'rx' => $port->{'rx_bytes'}, + 'tx' => $port->{'tx_bytes'} + }; + } + } + $Data{'device'}{ make_safe($thisDevice->{'name'}, $thisDevice->{'serial'}) }{'ports'} = \@port_list; + } + + if ( $thisDevice->{'type'} eq 'uap' ) { # Handle APS specially - record radio stats + my @theseRadios; + + foreach my $thisRadio ( @{$thisDevice->{'radio_table_stats'}} ) { + my $name = make_safe( $thisRadio->{'name'}, "" ); + my $label = ( $thisRadio->{'channel'} < 12 ) ? '2.4Ghz' : '5Ghz'; + + $_ = $thisDevice->{'stat'}; + + push @theseRadios, { + 'name' => $name, + 'label' => $label . '-' . $thisDevice->{'name'}, + 'pckt' => $_->{$name . '-rx_packets'} + $_->{$name . '-tx_packets'}, + 'dret' => $_->{$name . '-rx_dropped'} + $_->{$name . '-tx_retries'} + $_->{$name . '-tx_dropped'}, + 'err' => ($_->{$name . '-rx_errors'} || 0) + ($_->{$name . '-tx_errors'} || 0), + 'type' => $label + }; + } + $Data{'device'}{ make_safe($thisDevice->{'name'}, $thisDevice->{'serial'}) }{'radio'} = \@theseRadios; + } + } # END PROCESSING OF DEVICE DATA + + + # PROCESS NETWORK TYPE DATA + + # -> UNLESS, type graph is disabled. + # + # WHY: if the client list is large (huge. 10,000+), this is CPU intensive + if ( !$PluginConfig{'enable_clients_type'} ) { return 1; } + + $Data{'types'} = { + "wired" => ["Wired Connection", 1, 0, 0], + "wifi" => ["Wireless Connection", 1, 0, 0], + "tuser" => ["Total Users", 0, 0, 0], + "tguest" => ["Total Guests", 0, 0, 0], + "authed" => ["Authorized Guests", 0, 0, 0], + "unauth" => ["Unauthorized Guests", 0, 0, 0], + }; + + $Data{'typesOrder'} = ( $PluginConfig{'show_authorized_clients_type'} ) ? + [ "wired", "wifi", "tuser", "tguest", "authed", "unauth"] : + [ "wired", "wifi", "tuser", "tguest" ]; + + + my @wlans; + + foreach my $thisNet ( @{$APIJsonResponse{'wlan'}->{'data'}} ) { + $Data{'types'}{ make_safe($thisNet->{'name'}, "") } = [ $thisNet->{'name'}, 1, 0, 0 ]; + push @wlans, make_safe($thisNet->{'name'}, ""); + } + + foreach ( sort @wlans ) { + push @{$Data{'typesOrder'}}, $_; + } + + foreach my $client ( @{$APIJsonResponse{'sta'}->{'data'}} ) { + if ( $client->{"is_wired"} ) { + if ( $client->{"is_guest"} ) { + $Data{'types'}->{'wired'}[3]++; + $Data{'types'}->{'guest'}[3]++; + } else { + $Data{'types'}->{'wired'}[2]++; + $Data{'types'}->{'user'}[2]++; + } + } else { + if ( $client->{"is_guest"} ) { + $Data{'types'}->{make_safe($client->{"essid"}, "")}[3]++; + $Data{'types'}->{'wifi'}[3]++; + $Data{'types'}->{'guest'}[3]++; + if ( $client->{"authorized"} ) { + $Data{'types'}->{'authed'}[3]++; + } else { + $Data{'types'}->{'unauth'}[3]++; + } + } else { + $Data{'types'}->{make_safe($client->{"essid"}, "")}[2]++; + $Data{'types'}->{'wifi'}[2]++; + $Data{'types'}->{'user'}[2]++; + } + } + } + + return 1; +} + + +sub fetch_data { + # Set up curl, and login to API + $curl->setopt($curl->CURLOPT_POST,1); + $curl->setopt($curl->CURLOPT_COOKIEFILE,""); # Session only cookie + $curl->setopt($curl->CURLOPT_SSL_VERIFYPEER, (( $APIconfig{"ssl_verify_peer"} =~ m/no/i ) ? 0 : 1) ); + $curl->setopt($curl->CURLOPT_SSL_VERIFYHOST, (( $APIconfig{"ssl_verify_host"} =~ m/no/i ) ? 0 : 2) ); + $curl->setopt($curl->CURL_SSLVERSION_TLSv1, 1); + $curl->setopt($curl->CURLOPT_URL, $APIPoint{'login'}); + $curl->setopt($curl->CURLOPT_POSTFIELDS, q[{"username":"] . $APIconfig{"user"} . q[", "password":"] . $APIconfig{"pass"} . q["}] ); + $curl->setopt($curl->CURLOPT_WRITEDATA, \$APIResponse{'login'}); + $retcode = $curl->perform; + + if ( $retcode != 0 ) { + die "FATAL:$me: Unable to connect to API: " . $curl->strerror($retcode) . " " . $curl->errbuf . "\n"; + } + + $APIJsonResponse{'login'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode($APIResponse{'login'}); + + if ( $APIJsonResponse{'login'}->{'meta'}->{'rc'} ne 'ok' ) { + die "FATAL:$me: Unable to login to API - it said: " , $APIJsonResponse{'login'}->{'meta'}->{'msg'} , "\n"; + } + + # Change method to GET + $curl->setopt($curl->CURLOPT_HTTPGET,1); + + + # Get some API data. + + # Device data + $curl->setopt($curl->CURLOPT_WRITEDATA, \$APIResponse{'device'}); + $curl->setopt($curl->CURLOPT_URL, $APIPoint{'device'}); + $retcode = $curl->perform; + + if ( $retcode != 0 ) { + die "FATAL:$me: Unable to connect to API: " . $curl->strerror($retcode) . " " . $curl->errbuf . "\n"; + } + + $APIJsonResponse{'device'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode($APIResponse{'device'}); + + if ( $APIJsonResponse{'device'}->{'meta'}->{'rc'} ne 'ok' ) { + die "FATAL:$me: Unable get device data from API - it said: " , $APIJsonResponse{'device'}->{'meta'}->{'msg'} , "\n"; + } + + # STA (client) data + $curl->setopt($curl->CURLOPT_WRITEDATA, \$APIResponse{'sta'}); + $curl->setopt($curl->CURLOPT_URL, $APIPoint{'sta'}); + $retcode = $curl->perform; + + if ( $retcode != 0 ) { + die "FATAL:$me: Unable to connect to API: " . $curl->strerror($retcode) . " " . $curl->errbuf . "\n"; + } + + $APIJsonResponse{'sta'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode($APIResponse{'sta'}); + + if ( $APIJsonResponse{'sta'}->{'meta'}->{'rc'} ne 'ok' ) { + die "FATAL:$me: Unable get sta data from API - it said: " , $APIJsonResponse{'sta'}->{'meta'}->{'msg'} , "\n"; + } + + # WLAN data + $curl->setopt($curl->CURLOPT_WRITEDATA, \$APIResponse{'wlan'}); + $curl->setopt($curl->CURLOPT_URL, $APIPoint{'wlan'}); + $retcode = $curl->perform; + + if ( $retcode != 0 ) { + die "FATAL:$me: Unable to connect to API: " . $curl->strerror($retcode) . " " . $curl->errbuf . "\n"; + } + + $APIJsonResponse{'wlan'} = $jsonOBJ->allow_nonref->utf8->relaxed->decode($APIResponse{'wlan'}); + + if ( $APIJsonResponse{'wlan'}->{'meta'}->{'rc'} ne 'ok' ) { + die "FATAL:$me: Unable get wlan data from API - it said: " , $APIJsonResponse{'wlan'}->{'meta'}->{'msg'} , "\n"; + } +} + +# Make field names safe, and lowercase. +# +# Typically, $extraName should be the MAC address of the unique ID identifier as the unifi +# controller software does not enforce that device names or network names are unique. +sub make_safe { + my ( $name, $extraName ) = ( @_ ); + if ( $extraName ne "" ) { + return clean_fieldname(lc($name) . "_" . $extraName); + } else { + return lc(clean_fieldname($name)); + } +} + +# Get a default from an environmental variable - return text +# +# env_default(, ) +sub env_default_text { + my ( $env_var, $default ) = (@_); + return ( ( defined $ENV{$env_var} ) ? $ENV{$env_var} : $default ), +} + +# Get a default from an environmental variable - boolean true +# +# env_default_bool_true (, ) +sub env_default_bool_true { + my $env_var = $_[0]; + return ( ( defined $ENV{$env_var} && $ENV{$env_var} =~ m/no/i ) ? 0 : 1 ); +} + +# Quick 2 digit zero pad +sub zPad { return sprintf("%02d", $_[0]); } diff --git a/plugins/ubuntu/apt_ubuntu b/plugins/ubuntu/apt_ubuntu index a54e1008..cf30c0c5 100755 --- a/plugins/ubuntu/apt_ubuntu +++ b/plugins/ubuntu/apt_ubuntu @@ -21,12 +21,15 @@ # Magic markers - optional - used by installation scripts and # munin-config: # -#%# capabilities=autoconf -#%# family=contrib +# #%# capabilities=autoconf +# #%# family=contrib + +import os +import sys +import warnings ########################################################### -category = 'security' # 'upgrades' -title = 'Upgradable packages' # 'Upgradeable packages' +title = 'Upgradable packages' # 'Upgradeable packages' vlabel = 'Total packages' other = 'other' total = 'total' @@ -38,12 +41,9 @@ origins = ['Ubuntu'] critical = 1 ########################################################### -import os -import sys -import warnings - warnings.filterwarnings('ignore', 'apt API not stable yet', FutureWarning) + def autoconf(): if os.path.exists('/etc/lsb-release'): for line in open('/etc/lsb-release'): @@ -51,60 +51,60 @@ def autoconf(): try: import apt except ImportError: - print 'no (python-apt not installed)' - sys.exit(1) + print('no (python-apt not installed)') + sys.exit(0) cache = apt.Cache() - if not cache.has_key('update-notifier-common'): - print 'no (update-notifier-common not found)' - sys.exit(1) + if 'update-notifier-common' not in cache: + print('no (update-notifier-common not found)') + sys.exit(0) if not cache['update-notifier-common'].isInstalled: - print 'no (update-notifier-common not installed)' - sys.exit(1) + print('no (update-notifier-common not installed)') + sys.exit(0) if not os.path.exists('/etc/apt/apt.conf.d/10periodic'): - print 'no (/etc/apt/apt.conf.d/10periodic not found)' + print('no (/etc/apt/apt.conf.d/10periodic not found)') sys.exit(1) for line in open('/etc/apt/apt.conf.d/10periodic'): if line.strip() == 'APT::Periodic::Update-Package-Lists "1";': - print 'yes' + print('yes') sys.exit(0) - print 'no (APT::Periodic::Update-Package-Lists not "1")' - sys.exit(1) - print 'no' - sys.exit(1) - -def config(): - print 'graph_category security' - print 'graph_title %s' % (title) - #print 'graph_total %s' % (total) - print 'graph_vlabel %s' % (vlabel) - for i, archive in enumerate(archives + [other]): - if len(colour) > i: - print '%s.colour %s' % (archive, colour[i]) - if i < critical: - print '%s.critical 0:0' % (archive) - if i == 0: - print '%s.draw AREA' % (archive) - else: - print '%s.draw STACK' % (archive) - print '%s.label %s' % (archive, archive) - if i + 1 > critical: - print '%s.warning 0:0' % (archive) - print 'total.colour 000000' - print 'total.draw LINE1' - print 'total.label %s' % (total) + print('no (APT::Periodic::Update-Package-Lists not "1")') + sys.exit(0) + print('no (missing /etc/lsb-release file)') sys.exit(0) + +def config(): + print('graph_category security') + print('graph_title %s' % (title)) + print('graph_vlabel %s' % (vlabel)) + for i, archive in enumerate(archives + [other]): + if len(colour) > i: + print('%s.colour %s' % (archive, colour[i])) + if i < critical: + print('%s.critical 0:0' % (archive)) + if i == 0: + print('%s.draw AREA' % (archive)) + else: + print('%s.draw STACK' % (archive)) + print('%s.label %s' % (archive, archive)) + if i + 1 > critical: + print('%s.warning 0:0' % (archive)) + print('total.colour 000000') + print('total.draw LINE1') + print('total.label %s' % (total)) + sys.exit(0) + + def check_origin(pkg): - #print 'Checking: %s (%s)' % (pkg.name, map(str, pkg.candidateOrigin)) if pkg.candidate.origins: for archive in archives: for origin in pkg.candidate.origins: - #a = origin.archive.rpartition('-')[2] a = origin.archive.split('-')[origin.archive.count('-')] - if a == archive and origin.origin in origins: + if (a == archive) and (origin.origin in origins): return a return other + if len(sys.argv) > 1: if sys.argv[1] == 'autoconf': autoconf() @@ -116,19 +116,22 @@ if len(sys.argv) > 1: try: import apt + import apt_pkg except ImportError: - print "The module 'apt' is currently not installed. You can install it by typing:\nsudo apt-get install python-apt\nImportError: No module named apt" + print("The module 'apt' is currently not installed. You can install it by typing:\n" + "sudo apt-get install python-apt\nImportError: No module named apt") sys.exit(1) + pkgs = {} total = 0 for pkg in apt.Cache(): - if pkg.is_upgradable: + if (pkg.is_upgradable) and (pkg._pkg.selected_state != apt_pkg.SELSTATE_HOLD): a = check_origin(pkg) pkgs[a] = pkgs.get(a, 0) + 1 total += 1 for archive in archives + [other]: - print '%s.value %s' % (archive, pkgs.pop(archive, 0)) + print('%s.value %s' % (archive, pkgs.pop(archive, 0))) -print 'total.value %s' % (total) +print('total.value %s' % (total)) diff --git a/plugins/ultramonkey/ultramonkey-l7 b/plugins/ultramonkey/ultramonkey-l7 index 22878f9e..8778bf0f 100755 --- a/plugins/ultramonkey/ultramonkey-l7 +++ b/plugins/ultramonkey/ultramonkey-l7 @@ -24,10 +24,9 @@ sub get_autoconf `$L7VSADM`; if ( $? ) { print qq{no ($L7VSADM something wrong ...)\n}; - exit 1; + } else { + print qq{yes\n}; } - - print qq{yes\n}; } diff --git a/plugins/unicorn/unicorn_ b/plugins/unicorn/unicorn_ old mode 100644 new mode 100755 diff --git a/plugins/unicorn/unicorn_memory_status b/plugins/unicorn/unicorn_memory_status index a11aa2bf..01f08953 100755 --- a/plugins/unicorn/unicorn_memory_status +++ b/plugins/unicorn/unicorn_memory_status @@ -8,10 +8,10 @@ # # set path to your rails app -RAILS_ROOT = "/path/to/rails/app" +RAILS_ROOT = '/path/to/rails/app'.freeze # set name to your unicorn.pid -PID_NAME = "unicorn.pid" +PID_NAME = 'unicorn.pid'.freeze module Munin class UnicornMemoryStatus @@ -30,9 +30,11 @@ module Munin ps_output = `ps w --ppid #{master_pid}` ps_output.split("\n").each do |line| chunks = line.strip.split(/\s+/, 5) - pid, pcmd = chunks[0], chunks[4] - next if pid !~ /\A\d+\z/ or pcmd !~ /worker/ - result << pid.to_i + pid = chunks[0] + pcmd = chunks[4] + next if pid !~ /\A\d+\z/ || pcmd !~ /worker/ + + result << pid.to_i end result end @@ -41,18 +43,18 @@ module Munin result = 0 memory = memory_usage result += memory[:master][master_pid] - memory[:worker].each do |pid, worker_memory| + memory[:worker].each do |_pid, worker_memory| result += worker_memory end result end def memory_usage - result = { :master => {master_pid => nil}, :worker => {} } + result = { master: { master_pid => nil }, worker: {} } ps_output = `ps auxw | grep unicorn` ps_output.split("\n").each do |line| chunks = line.strip.split(/\s+/, 11) - pid, pmem_rss, _ = chunks.values_at(1, 5, 10) + pid, pmem_rss, = chunks.values_at(1, 5, 10) pmem = pmem_rss.to_i * 1024 pid = pid.to_i @@ -68,15 +70,15 @@ module Munin end case ARGV[0] -when "autoconf" - puts "yes" -when "config" +when 'autoconf' + puts 'yes' +when 'config' puts "graph_title Unicorn [#{File.basename(__FILE__).gsub(/^unicorn_memory_status_/, '')}] - Memory usage" - puts "graph_args --base 1024 -l 0" - puts "graph_vlabel bytes" - puts "graph_category webserver" - puts "total_memory.label total_memory" - puts "total_memory.draw LINE2" + puts 'graph_args --base 1024 -l 0' + puts 'graph_vlabel bytes' + puts 'graph_category webserver' + puts 'total_memory.label total_memory' + puts 'total_memory.draw LINE2' else m = Munin::UnicornMemoryStatus.new(ENV['rails_root'] || RAILS_ROOT, ENV['pid_name'] || PID_NAME) puts "total_memory.value #{m.total_memory}" diff --git a/plugins/unicorn/unicorn_status b/plugins/unicorn/unicorn_status index 3422f992..547116d3 100755 --- a/plugins/unicorn/unicorn_status +++ b/plugins/unicorn/unicorn_status @@ -8,10 +8,10 @@ # # set path to your rails app -RAILS_ROOT = "/path/to/rails/app" +RAILS_ROOT = '/path/to/rails/app'.freeze # set name to your unicorn.pid -PID_NAME = "unicorn.pid" +PID_NAME = 'unicorn.pid'.freeze module Munin class UnicornStatus @@ -30,9 +30,11 @@ module Munin ps_output = `ps w --ppid #{master_pid}` ps_output.split("\n").each do |line| chunks = line.strip.split(/\s+/, 5) - pid, pcmd = chunks[0], chunks[4] - next if pid !~ /\A\d+\z/ or pcmd !~ /worker/ - result << pid.to_i + pid = chunks[0] + pcmd = chunks[4] + next if pid !~ /\A\d+\z/ || pcmd !~ /worker/ + + result << pid.to_i end result end @@ -66,15 +68,15 @@ module Munin end case ARGV[0] -when "autoconf" - puts "yes" -when "config" +when 'autoconf' + puts 'yes' +when 'config' puts "graph_title Unicorn [#{File.basename(__FILE__).gsub(/^unicorn_status_/, '')}] - Status" - puts "graph_args -l 0" - puts "graph_vlabel number of workers" - puts "graph_category webserver" - puts "total_worker.label total_workers" - puts "idle_worker.label idle_workers" + puts 'graph_args -l 0' + puts 'graph_vlabel number of workers' + puts 'graph_category webserver' + puts 'total_worker.label total_workers' + puts 'idle_worker.label idle_workers' else m = Munin::UnicornStatus.new(ENV['rails_root'] || RAILS_ROOT, ENV['pid_name'] || PID_NAME) puts "total_worker.value #{m.worker_count}" diff --git a/plugins/user/cpubyuser b/plugins/user/cpubyuser index eb3edd70..2b383b82 100755 --- a/plugins/user/cpubyuser +++ b/plugins/user/cpubyuser @@ -2,15 +2,26 @@ # # Plugin to monitor CPU usage, for a selected set of users # -# Usage: Place in /etc/munin/node.d/ (or link it there using ln -s) +# Usage: Place in /etc/munin/plugins/ (or link it there using ln -s) # Add this to your /etc/munin/plugin-conf.d/munin-node: # [cpubyuser] +# user root # required if /proc can't be read from by any user! # env.USERS root yann +# env.OTHER_FIELD others # enable 'others'-list, set the label/field name # -# If env.USERS is set to ALL, count all logged in users. +# root and yann being a list of the users to monitor. # -# root and yann being a list of the users to monitor. -# You need to also make sure that awk is installed +# If env.USERS is set to ALL, count all logged in users, and if set to +# ALLPROC, *all* users with a running process will be listed, except for root. +# +# You need to also make sure that awk is installed +# +# 2019-08-30 v 1.4 pcy : +# - add USERS=ALLPROC, not relying on a tty or pty being present for user detection +# - OTHERS_FIELD now defaults to disabled, explicitly give it a +# value to re-enable it (eg. 'others') +# - use ps(1) instead of top(1) for easier and more robust +# parsing/summary calculation # # 2008-12-08 v 1.3.1 Hanisch Elián: # - support for dots in user names. @@ -39,9 +50,12 @@ . "$MUNIN_LIBDIR/plugins/plugin.sh" -OTHER_FIELD="others" -[ "$USERS" = "ALL" ] && USERS=$(w --no-header | awk '{ print $1 }' | sort | uniq) - +[ -z "$USERS" ] && USERS=ALL +if [ "$USERS" = "ALLPROC" ]; then + USERS="$(ps ax --format uname | tail +2 | sort -u | grep -v -e '^root$')" +elif [ "$USERS" = "ALL" ]; then + USERS="$(w --no-header | cut -d' ' -f 1 | sort -u)" +fi if [ "$1" = "autoconf" ]; then if [ -n "$USERS" ]; then @@ -61,44 +75,42 @@ if [ "$1" = "config" ]; then echo "graph_scale no" echo "graph_period second" user_fields="$(for user in $USERS; do clean_fieldname "$user" | tr '\n' ' '; done)" - echo "graph_order $user_fields $OTHER_FIELD" + echo "graph_order $user_fields $(clean_fieldname "$OTHER_FIELD")" for user in $USERS "$OTHER_FIELD"; do - user_field="$(clean_fieldname "$user")" - echo "${user_field}.label $user" - echo "${user_field}.info CPU used by user $user" - echo "${user_field}.type GAUGE" - echo "${user_field}.draw AREASTACK" + if [ -n "$user" ]; then + user_field="$(clean_fieldname "$user")" + echo "${user_field}.label $user" + echo "${user_field}.info CPU used by user $user" + echo "${user_field}.type GAUGE" + echo "${user_field}.draw AREASTACK" + fi done exit fi -top -b -n 1 | sed '1,/^ *PID /d' | \ +OTHER_PRINT="" +[ -z "$OTHER_FIELD" ] || OTHER_PRINT="print \"$(clean_fieldname "$OTHER_FIELD")\", others_sum;" + +ps ax --format "%cpu user" | tail +2 | \ awk -v USERS="$USERS" ' # Store the CPU usage of each process - the mapping to the # user happens later. We cannot use the second column # (username) directly, since it may be abbreviated (ending # with "+"). - { CPU_PER_PID[$1]=$9 } + { CPU_USER[$2]=$1 } END { - split(USERS, user_array) - for (user_index in user_array) { - user = user_array[user_index] - # retrieve all process IDs belonging to the user - "ps -u "user" -o pid --no-headers 2>/dev/null | tr \"\n\" \" \"" | getline pids - user_cpu = 0 - split(pids, pid_array) - # summarize the cpu usage of this usage - for (pid_index in pid_array) { - pid = pid_array[pid_index] - user_cpu += CPU_PER_PID[pid] - delete CPU_PER_PID[pid] - } - print user, user_cpu - } - # add all remaining cpu usages into "others" others_sum = 0 - for (other_usage in CPU_PER_PID) others_sum+=CPU_PER_PID[other_usage] - print "'"$OTHER_FIELD"'", others_sum; + split(USERS, user_array) + for (user in CPU_USER) { + m = match(USERS,user) + if (m != 0) { + _user=user + gsub(/[-.]/,"_",_user); + print _user, (CPU_USER[user]) + } else + others_sum += CPU_USER[user] + } + '"$OTHER_PRINT"' }' | while read -r user count; do # apply fieldname cleanup echo "$(clean_fieldname "$user").value $count" diff --git a/plugins/user/cronjobs b/plugins/user/cronjobs new file mode 100755 index 00000000..12859d8f --- /dev/null +++ b/plugins/user/cronjobs @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# -*- python -*- +""" +=head1 NAME + +Plugin to monitor the number of cronjobs running per user, gathering data from +syslog. + +=head1 INSTALLATION + +Usage: Place in /etc/munin/plugins/ (or link it there using ln -s) + +=head1 CONFIGURATION + +Add this to your /etc/munin/plugin-conf.d/munin-node: + +=over 2 + + [cronjobs] + user root + env.syslog_path /var/log/syslog # default value + env.cron_ident_regex crond? # for finding cron entries in the syslog, + case-insensitive (default value) + +=back + +The plugin needs to run as root in order to read from syslog. + +=head1 HISTORY + +2019-09-09: v 1.0 pcy : created + +=head1 USAGE + +Parameters understood: + + config (required) + autoconf (optional - used by munin-config) + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf +""" + + +from datetime import datetime, timedelta, timezone +import os +import re +import shutil +import socket +import sys +import struct +import time + + +syslog_path = os.getenv('syslog_path', '/var/log/syslog') +cron_ident_regex = os.getenv('cron_ident_regex', 'crond?') + +# expected format: +# CRON[]: () CMD ... +# example: +# Aug 16 22:00:01 zap CRON[23060]: (root) CMD (/usr/local/bin/dnsconfig -s) +cron_syslog_regex = re.compile('^.* %s %s\[[0-9]*\]: \((.*?)\)' # noqa: W605 + % (socket.gethostname(), cron_ident_regex), + re.I) + +STATEFILE = os.getenv('MUNIN_STATEFILE') + + +def loadstate(): + if not os.path.isfile(STATEFILE): + return None + + with open(STATEFILE, 'rb') as f: + tstamp = struct.unpack('d', f.read())[0] + return datetime.fromtimestamp(tstamp, tz=timezone.utc) + + +def savestate(state): + with open(STATEFILE, 'wb') as f: + f.write(struct.pack('d', state.timestamp())) + + +def extrcronuser(line: str): + t = cron_syslog_regex.search(line) + if t is None: + return None + gr = t.groups() + return gr[0] if len(gr) == 1 else None + + +def getcronlines(): + with open(syslog_path, 'r') as f: + for x in f.readlines(): + user = extrcronuser(x) + if user is not None: + yield (x, user) + + +def getcronusers(lines): + return set(x[1] for x in lines) + + +def autoconf(): + if shutil.which('crontab') is None: + print("no (need cron installed)") + elif not os.access(syslog_path, os.R_OK): + print("no (cannot access syslog file '%s')" % syslog_path) + else: + print("yes") + + +def config(): + usernames = getcronusers(getcronlines()) + print("""\ +graph_title Cron jobs per user +graph_vlabel jobs +graph_category processes""") + for n in usernames: + print(n + ".label " + n) + print(n + ".info jobs of user " + n) + + +def fetch(): + STATE = loadstate() + # why is there no stdlib function for this?! + localtz = timezone(timedelta(seconds=time.localtime().tm_gmtoff)) + now = datetime.now() + now_withtz = datetime.now(tz=localtz) + yearsfx = ' ' + str(now.year) + pyearsfx = ' ' + str(now.year - 1) + cronlines = list(getcronlines()) + allnames = getcronusers(cronlines) + counts = {} + + hostname = socket.gethostname() + for ln, name in cronlines: + datestr = ln[:ln.index(hostname)].strip() + logdate = datetime.strptime(datestr + yearsfx, "%b %d %H:%M:%S %Y") + if logdate > now: + logdate = datetime.strptime(datestr + pyearsfx, "%b %d %H:%M:%S %Y") + # add timezone info (ugly hack), as strptime doesn't want to do this + logdate = now_withtz + (logdate - now) + if STATE is None or logdate > STATE: + counts[name] = (counts[name] + 1) if name in counts else 1 + + for n in allnames: + if n in counts: + print("%s.value %d" % (n, counts[n])) + else: + print("%s.value 0" % n) + + savestate(now_withtz) + + +if len(sys.argv) >= 2: + if sys.argv[1] == 'autoconf': + autoconf() + elif sys.argv[1] == 'config': + config() + else: + fetch() +else: + fetch() diff --git a/plugins/user/membyuser b/plugins/user/membyuser index 25b8b3fb..ec050ac0 100755 --- a/plugins/user/membyuser +++ b/plugins/user/membyuser @@ -1,11 +1,19 @@ #!/bin/bash # -# Plugin to monitor Memory usage inspired by cpubyuser -# -# 2012-05-23 Sebastien Campion +# Plugin to monitor Memory usage inspired by cpubyuser +# +# Usage: Place in /etc/munin/plugins/ (or link it there using ln -s) +# Add this to your /etc/munin/plugin-conf.d/munin-node: +# [membyuser] +# user root # required if /proc can't be read from by any user! -LU=`ps auh | cut -d' ' -f 1 | sort -u` -USERS=`echo $LU` +## 2012-05-23 Sebastien Campion +# changed on 2019-08-30 by pcy : +# - change category from 'memory' to 'system' (so it appears next to cpubyuser) +# - use rss instead of vsz +# - more robust username enumeration + +USERS="$(ps ax --format uname | tail +2 | sort -u | grep -v -e '^root$')" if [ "$1" = "autoconf" ]; then if [ -n "$USERS" ]; then @@ -17,12 +25,11 @@ if [ "$1" = "autoconf" ]; then fi if [ "$1" = "config" ]; then - echo "graph_args --base 1000 -r --lower-limit 0" + echo "graph_args --base 1024" echo "graph_title Memory usage, by user" echo "graph_category memory" echo "graph_info This graph shows memory usage, for monitored users." - echo "graph_vlabel KB" - echo "graph_scale no" + echo "graph_vlabel Bytes" echo "graph_period second" _USERS=${USERS//[-.]/_} echo "graph_order $_USERS others" @@ -32,17 +39,12 @@ if [ "$1" = "config" ]; then echo "${_USER}.label $USER" echo "${_USER}.info Memory used by user $USER" echo "${_USER}.type GAUGE" - if [ $FIRSTUSER -eq 1 ]; then - echo "${_USER}.draw AREA" - FIRSTUSER=0 - else - echo "${_USER}.draw STACK" - fi + echo "${_USER}.draw AREASTACK" done exit fi -ps -e -o "%z%U" | \ +ps -e -o rss,user | \ awk -v USERS="$USERS" ' { if ($2 != "USER") MEM_USER[$2]+=$1 } END { @@ -52,9 +54,9 @@ ps -e -o "%z%U" | \ if (m != 0) { _user=user gsub(/[-.]/,"_", _user); - print _user".value", MEM_USER[user] + print _user".value", (MEM_USER[user] * 1024) } else - others_sum += MEM_USER[user] + others_sum += (MEM_USER[user] * 1024) } print "others.value", others_sum; }' diff --git a/plugins/user/multipsu b/plugins/user/multipsu index ab196bfa..ab29db3a 100755 --- a/plugins/user/multipsu +++ b/plugins/user/multipsu @@ -11,8 +11,8 @@ # Configuration example # # [multipsu] -# env.multipsunames root exim ftp -# +# env.multipsunames root exim ftp +# # # Magic markers (optional): #%# family=manual @@ -25,7 +25,7 @@ if [ "$1" = "autoconf" ]; then echo yes fi exit 0 -fi +fi if [ "$1" = "config" ]; then diff --git a/plugins/user/procbyuser b/plugins/user/procbyuser new file mode 100755 index 00000000..41f87be6 --- /dev/null +++ b/plugins/user/procbyuser @@ -0,0 +1,100 @@ +#!/bin/sh +# -*- sh -*- + +: << =cut + +=head1 INTRODUCTION + +Plugin to monitor the amount of processes owned by a user + +=head1 INSTALLATION + +Usage: Place in /etc/munin/plugins/ (or link it there using ln -s) + +=head1 CONFIGURATION + +Add this to your /etc/munin/plugin-conf.d/munin-node: + +=over 2 + + [procbyuser] + user root # required if /proc can't be read from by any user! + env.USERS root yann # defaults to ALL, i.e. display all users. 'root' is counted as one of the 'others' + env.OTHER_FIELD others # enable 'others'-list, set the label/field name + +=back + +=head1 HISTORY + +2019-09-06 v 1.0 pcy : created (based on cpubyuser) + +=head1 USAGE + +Parameters understood: + + config (required) + autoconf (optional - used by munin-config) + +=head1 MAGIC MARKERS + +#%# family=auto +#%# capabilities=autoconf + +=cut + +. "$MUNIN_LIBDIR/plugins/plugin.sh" + +[ -z "$USERS" ] && USERS=ALL +if [ "$USERS" = "ALL" ]; then + USERS="$(ps ax --format uname | tail +2 | sort -u | grep -v -e '^root$')" +fi + +if [ "$1" = "autoconf" ]; then + echo "yes" +fi + +if [ "$1" = "config" ]; then + echo "graph_args -r --lower-limit 0" + echo "graph_title Processes per user" + echo "graph_category processes" + echo "graph_info This graph shows the amount of processes owned by a user." + echo "graph_vlabel processes" + echo "graph_scale no" + echo "graph_period second" + user_fields="$(for user in $USERS; do clean_fieldname "$user" | tr '\n' ' '; done)" + echo "graph_order $user_fields $(clean_fieldname "$OTHER_FIELD")" + for user in $USERS "$OTHER_FIELD"; do + if [ -n "$user" ]; then + user_field="$(clean_fieldname "$user")" + echo "${user_field}.label $user" + echo "${user_field}.info processes of user $user" + echo "${user_field}.type GAUGE" + echo "${user_field}.draw AREASTACK" + fi + done + exit +fi + +OTHER_PRINT="" +[ -z "$OTHER_FIELD" ] || OTHER_PRINT="print \"$(clean_fieldname "$OTHER_FIELD")\", others_sum;" + +ps ax -o user:42= -o cmd= | \ + awk -v USERS="$USERS" ' + !($2 ~ /^\[/) { NUSER[$1]=NUSER[$1]+1 } # filter away kernel threads + END { + others_sum = 0 + split(USERS, user_array) + for (user in NUSER) { + m = match(USERS,user) + if (m != 0) { + _user=user + gsub(/[-.]/,"_",_user); + print _user, (NUSER[user]) + } else + others_sum += NUSER[user] + } + '"$OTHER_PRINT"' + }' | while read -r user count; do + # apply fieldname cleanup + echo "$(clean_fieldname "$user").value $count" + done diff --git a/plugins/user/system_users b/plugins/user/system_users index ef760abd..81dc62ba 100755 --- a/plugins/user/system_users +++ b/plugins/user/system_users @@ -44,7 +44,7 @@ EOM exit 0;; esac -count=$(/usr/bin/who -q ) +count=$(/usr/bin/who -q ) printf "user.value %i\n" ${count#*=} exit 0 diff --git a/plugins/uucp/uustat b/plugins/uucp/uustat index 76bd6de7..638a3611 100755 --- a/plugins/uucp/uustat +++ b/plugins/uucp/uustat @@ -79,11 +79,10 @@ sub host_excluded if (@ARGV > 0 && $ARGV[0] eq 'autoconf') { if (-x $uustat) { print "yes\n"; - exit 0; } else { print "no\n"; - exit 1; } + exit 0; } if (@ARGV > 0 && $ARGV[0] eq 'config') { diff --git a/plugins/uwsgi/uwsgi_ b/plugins/uwsgi/uwsgi_ old mode 100644 new mode 100755 index c0fb48eb..09ef3327 --- a/plugins/uwsgi/uwsgi_ +++ b/plugins/uwsgi/uwsgi_ @@ -64,7 +64,7 @@ elif [ "$mode" = "average" ]; then ps awwwux | grep -i 'uwsgi' | grep -v grep | awk '{total_mem = $6 * 1024 + total_mem; total_proc++} END{printf("%d\n", total_mem / total_proc)}' exit 0 fi - + fi exit 0 diff --git a/plugins/varnish/README-varnish4.md b/plugins/varnish/README-varnish4.md index f8700e85..d77a9fcc 100644 --- a/plugins/varnish/README-varnish4.md +++ b/plugins/varnish/README-varnish4.md @@ -30,4 +30,4 @@ not in the path already. `env.name` is blank (undefined) by default and can be used to specify a -n name argument to varnish if multiple instances are running on the same -server. +server. diff --git a/plugins/varnish/varnish2_ b/plugins/varnish/varnish2_ index 0d3ec1b3..10d20a3c 100755 --- a/plugins/varnish/varnish2_ +++ b/plugins/varnish/varnish2_ @@ -1,6 +1,6 @@ #!/usr/bin/perl -# Original author: Bjrn Ruberg +# Original author: Bjørn Ruberg # Updated to Varnish 2.0/added instructions: Daniel Wirtz # # Installation (Debian): @@ -76,5 +76,5 @@ foreach $line (`varnishstat -1`) { } } } - + exit; diff --git a/plugins/varnish/varnish4_ b/plugins/varnish/varnish4_ old mode 100644 new mode 100755 index ec96075c..c20388bf --- a/plugins/varnish/varnish4_ +++ b/plugins/varnish/varnish4_ @@ -968,7 +968,7 @@ sub populate_stats my $parser = new XML::Parser(Handlers => {Start => \&xml_start_elem, End => \&xml_end_elem, Char => \&xml_characters} ); - + if ($varnishname) { $arg .= " -n $varnishname"; } @@ -1009,7 +1009,7 @@ sub print_dynamic } my $counter = $ASPECTS{$self}{'values'}{$name}{'counter'}; my $type = $ASPECTS{$self}{'values'}{$name}{'family'}; - + foreach my $key (keys %{$data{$type}}) { my $pname = normalize_name($type . "_" . $key . "_" . $counter); print $pname . $suffix . " "; @@ -1097,7 +1097,7 @@ sub get_config } next; } - + if (!print_if_exist(\%values,$value,'label')) { print "$value.label $data{$value}{'description'}\n"; } diff --git a/plugins/vbulletin/vbulletin4_users b/plugins/vbulletin/vbulletin4_users old mode 100644 new mode 100755 index ead4ebb8..b8ea1c12 --- a/plugins/vbulletin/vbulletin4_users +++ b/plugins/vbulletin/vbulletin4_users @@ -47,12 +47,12 @@ my $timeout = 30; if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { - if ($ret) - { + if ($ret) { print "no ($ret)\n"; - exit 1; + } else { + print "yes\n"; } - + exit 0; } if ( defined $ARGV[0] and $ARGV[0] eq "config" ) diff --git a/plugins/vbulletin/vbulletin_users b/plugins/vbulletin/vbulletin_users index 663eb3d0..27dd837d 100755 --- a/plugins/vbulletin/vbulletin_users +++ b/plugins/vbulletin/vbulletin_users @@ -47,12 +47,12 @@ my $timeout = 30; if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) { - if ($ret) - { + if ($ret) { print "no ($ret)\n"; - exit 1; + } else { + print "yes\n"; } - + exit 0; } if ( defined $ARGV[0] and $ARGV[0] eq "config" ) diff --git a/plugins/vdr/vdr_ b/plugins/vdr/vdr_ index f759a620..fb92443c 100755 --- a/plugins/vdr/vdr_ +++ b/plugins/vdr/vdr_ @@ -3,7 +3,7 @@ # Version 1.2 # - kein div 0 Fehler mehr wenn der Host nicht zu erreichen ist # - Serien Timer werden nun separat gezaehlt (anzahl pro Woche) -# - im Namen kann munic conform der hostname mit angegeben werden: vdr_localhost vdr_192.168.0.2, ... (localhost ist default) +# - im Namen kann Munin-konform der Hostname mit angegeben werden: vdr_localhost vdr_192.168.0.2, ... (localhost ist default) # - Timer werden nur ignoriert wenn sie 0(inaktiv) als Status haben # - Erkennung der neuen Aufzeichnungen passte nicht mehr # @@ -139,7 +139,7 @@ sub ermittelnTimer(){ $anzahlSerienMinuten+=$dauer*length($anzahl); } else { #print "keine Series"; - $anzahlTimer++; + $anzahlTimer++; $anzahlMinuten+=$dauer; } diff --git a/plugins/virtualbox/virtualbox_cpu_kernel b/plugins/virtualbox/virtualbox_cpu_kernel old mode 100644 new mode 100755 index 5f4db938..34551a9c --- a/plugins/virtualbox/virtualbox_cpu_kernel +++ b/plugins/virtualbox/virtualbox_cpu_kernel @@ -35,10 +35,10 @@ if [ "$1" = "config" ]; then echo 'graph_vlabel %' echo 'graph_scale no' echo 'graph_info This graph shows the percentage of processor time spent in kernel mode by the every single VM process.' - echo 'graph_category Virtualization' + echo 'graph_category virtualization' echo 'graph_period second' - vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read VM_NAME; do - VM_NAME_PRINT=`echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g'` + vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read -r VM_NAME; do + VM_NAME_PRINT=$(echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g') echo "${VM_NAME_PRINT}_kernel.label ${VM_NAME}" done exit 0 @@ -47,8 +47,7 @@ fi vboxmanage metrics setup --period 5 --samples 3 sleep 5 -vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read VM_NAME; do - VM_NAME_PRINT=`echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g'` - vboxmanage metrics query "${VM_NAME}" CPU/Load/Kernel | grep -E "^${VM_NAME}" | sed -r 's/^.*([0-9]+\.[0-9]+)%/'''${VM_NAME_PRINT}'''_kernel.value \1/' +vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read -r VM_NAME; do +VM_NAME_PRINT=$(echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g') + vboxmanage metrics query "${VM_NAME}" CPU/Load/Kernel | grep -E "^${VM_NAME}" | sed -r 's/^.*([0-9]+\.[0-9]+)%/'"${VM_NAME_PRINT}"'_kernel.value \1/' done - diff --git a/plugins/virtualbox/virtualbox_cpu_user b/plugins/virtualbox/virtualbox_cpu_user old mode 100644 new mode 100755 index 8bd77674..a7e34e85 --- a/plugins/virtualbox/virtualbox_cpu_user +++ b/plugins/virtualbox/virtualbox_cpu_user @@ -35,10 +35,10 @@ if [ "$1" = "config" ]; then echo 'graph_vlabel %' echo 'graph_scale no' echo 'graph_info This graph shows the percentage of processor time spent in user mode by the every single VM process.' - echo 'graph_category Virtualization' + echo 'graph_category virtualization' echo 'graph_period second' - vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read VM_NAME; do - VM_NAME_PRINT=`echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g'` + vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read -r VM_NAME; do + VM_NAME_PRINT=$(echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g') echo "${VM_NAME_PRINT}_user.label ${VM_NAME}" done exit 0 @@ -47,8 +47,7 @@ fi vboxmanage metrics setup --period 5 --samples 3 sleep 5 -vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read VM_NAME; do - VM_NAME_PRINT=`echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g'` - vboxmanage metrics query "${VM_NAME}" CPU/Load/User | grep -E "^${VM_NAME}" | sed -r 's/^.*([0-9]+\.[0-9]+)%/'''${VM_NAME_PRINT}'''_user.value \1/' +vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read -r VM_NAME; do + VM_NAME_PRINT=$(echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g') + vboxmanage metrics query "${VM_NAME}" CPU/Load/User | grep -E "^${VM_NAME}" | sed -r 's/^.*([0-9]+\.[0-9]+)%/'"${VM_NAME_PRINT}"'_user.value \1/' done - diff --git a/plugins/virtualbox/virtualbox_ram_usage b/plugins/virtualbox/virtualbox_ram_usage old mode 100644 new mode 100755 index e81db92c..a96d7f2e --- a/plugins/virtualbox/virtualbox_ram_usage +++ b/plugins/virtualbox/virtualbox_ram_usage @@ -34,19 +34,19 @@ if [ "$1" = "autoconf" ]; then fi if [ "$1" = "config" ]; then -RAMTOT=`free -b | grep Mem | awk '{print $2}'` + RAMTOT=$(free -b | grep Mem | awk '{print $2}') echo 'graph_title Memory usage of virtual machines' echo "graph_args --base 1024 -r --lower-limit 0 --upper-limit $RAMTOT --units-exponent 9" echo 'graph_vlabel GB' echo 'graph_info This graph shows the ram usage of every single VM process.' - echo 'graph_category Virtualization' + echo 'graph_category virtualization' echo 'graph_period second' I=0 - vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read VM_NAME; do - VM_NAME_PRINT=`echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g'` + vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read -r VM_NAME; do + VM_NAME_PRINT=$(echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g') echo "${VM_NAME_PRINT}.label ${VM_NAME}" - echo "${VM_NAME_PRINT}.cdef ${VM_NAME_PRINT},1024,*" - if [ ${I} -eq 0 ]; then + echo "${VM_NAME_PRINT}.cdef ${VM_NAME_PRINT},1024,*" + if [ "$I" -eq 0 ]; then echo "${VM_NAME_PRINT}.draw AREA" else echo "${VM_NAME_PRINT}.draw STACK" @@ -59,8 +59,8 @@ fi vboxmanage metrics setup --period 5 --samples 3 sleep 5 -vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read VM_NAME; do - VM_NAME_PRINT=`echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g'` # s/[.]/_/g;s/[ ]/_/g;s/[-]/_/g - vboxmanage metrics query "${VM_NAME}" RAM/Usage/Used | grep -E "^${VM_NAME}" | sed -r 's/^.* ([0-9]+) kB$/'''${VM_NAME_PRINT}'''.value \1/' +vboxmanage list vms | sed -r 's/^\"(.*)\" \{.*\}$/\1/' | while read -r VM_NAME; do + VM_NAME_PRINT=$(echo -e "${VM_NAME}" | sed 's/[^A-Za-z0-9_]/_/g') # s/[.]/_/g;s/[ ]/_/g;s/[-]/_/g + vboxmanage metrics query "${VM_NAME}" RAM/Usage/Used | grep -E "^${VM_NAME}" | sed -r 's/^.* ([0-9]+) kB$/'"${VM_NAME_PRINT}"'.value \1/' done diff --git a/plugins/vmware/esx_ b/plugins/vmware/esx_ index 4c4ad4b4..53af9b85 100755 --- a/plugins/vmware/esx_ +++ b/plugins/vmware/esx_ @@ -327,7 +327,7 @@ foreach $host_name (@host_names) { group => "datastore", name => "uncommitted", value => $_->uncommitted, - counter => PerfCounterInfo->new(nameInfo => ElementDescription->new(label => "Uncomitted", summary => "Additional storage space, in bytes, potentially used by the virtual machine on this datastore.\n\nAdditional space may be needed for example when lazily allocated disks grow, or storage for swap is allocated when powering on the virtual machine.\n\nIf the virtual machine is running off delta disks (for example because a snapshot was taken), then only the potential growth of the currently used delta-disks is considered.\n\n")), + counter => PerfCounterInfo->new(nameInfo => ElementDescription->new(label => "Uncommitted", summary => "Additional storage space, in bytes, potentially used by the virtual machine on this datastore.\n\nAdditional space may be needed for example when lazily allocated disks grow, or storage for swap is allocated when powering on the virtual machine.\n\nIf the virtual machine is running off delta disks (for example because a snapshot was taken), then only the potential growth of the currently used delta-disks is considered.\n\n")), vm => $vmId, instance => $uuid, unit => "Bytes" }); diff --git a/plugins/vmware/esxcli_env_ b/plugins/vmware/esxcli_env_ index 6b6dd7e3..273a6440 100755 --- a/plugins/vmware/esxcli_env_ +++ b/plugins/vmware/esxcli_env_ @@ -9,7 +9,7 @@ Configuration variables (/etc/munin/plugin-conf.d/): host_name - Name of you ESXi host as defined in munin.conf timeout - Plugin specific timeout Plugin specific: - env.esxi_host - (REQUIRED) hostname/ip esxcli connect to + env.esxi_host - (REQUIRED) hostname/ip esxcli connect to env.esxi_user - (REQUIRED) ESXi username to connect env.esxi_password - (REQUIRED) password for user given above env.cache_file - path to cache file (we do not want two or more sequential reconnections to ESXi host) @@ -52,7 +52,7 @@ then fi ### PROCESSING ### -# Case insensetive regex +# Case insensitive regex shopt -s nocasematch # Determine sensor type diff --git a/plugins/vmware/esxi b/plugins/vmware/esxi index 7aef50a3..a4fd18e5 100755 --- a/plugins/vmware/esxi +++ b/plugins/vmware/esxi @@ -30,7 +30,7 @@ function get_vmlist () { vmlist_linenumber="$(echo "$summary" | $GREP -n "^Vmid")" vmlist_linenumber="${vmlist_linenumber/:*/}" - + vmlist="$(echo "$summary" | $TAIL -n $(($total_linenumber - $vmlist_linenumber)) | tr -s " ")" } @@ -74,7 +74,7 @@ function VMoverallCpuUsage () { temp="${c/*overallCpuUsage = /}" vmcpu[$n]="${temp/,*/}" if [ "${vmcpu[$n]}" == "" ]; then - vmcpu[$n]="0" + vmcpu[$n]="0" fi (( n++ )) done <" ]; then - vmmem[$n]="0" + vmmem[$n]="0" fi (( n++ )) done <" ]; then - vmmem[$n]="0" + vmmem[$n]="0" fi (( n++ )) done < To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide -under a CC0 waiver. This software is distributed without any warranty. +under a CC0 waiver. This software is distributed without any warranty. http://creativecommons.org/publicdomain/zero/1.0/ @@ -74,7 +74,7 @@ import pywbem NS = 'root/cimv2' sensor_data = { - 2 : {'prefix':'temp', 'title':'Temperatures', 'unit':'C'}, + 2 : {'prefix':'temp', 'title':'Temperatures', 'unit':'°C'}, 3 : {'prefix':'volt', 'title':'Voltages', 'unit':'Volts'}, 4 : {'prefix':'amp', 'title':'Current', 'unit':'Amps'}, 5 : {'prefix':'fan', 'title':'Fans', 'unit':'RPM'} diff --git a/plugins/vmware/fusion_ b/plugins/vmware/fusion_ index 6ae2eec6..75500297 100755 --- a/plugins/vmware/fusion_ +++ b/plugins/vmware/fusion_ @@ -63,37 +63,37 @@ my @lines=split(/\n/,$output); if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { my $lcount = 0; my $base_config = "graph_category virtualization\n"; - + if( $type eq "pcpu" ) { print $base_config; - print "graph_args --base 1000 -l 0 -u 100 -r\n"; - print "graph_scale no\n"; + print "graph_args --base 1000 -l 0 -u 100 -r\n"; + print "graph_scale no\n"; print "graph_title CPU usage in % per VM\n"; print "graph_vlabel % of CPU usage\n"; - print "graph_info The Graph shows the CPU usage in % per VM\n"; + print "graph_info The Graph shows the CPU usage in % per VM\n"; foreach my $line(@lines) { - if( $line =~ /(? 2 ){ $vm[$count]=$entry; $count++; } - } + } $vm[3] =~ s/\.vmx//; - my $cat = clean_vmname($vm[3]); + my $cat = clean_vmname($vm[3]); if( $cat =~ /(? 0 ){ print $cat,"_pcpu.draw STACK\n"; } else { print $cat,"_pcpu.draw AREA\n"; } - $lcount++; + $lcount++; print $cat,"_pcpu.label $vm[3]\n"; print $cat,"_pcpu.type GAUGE\n"; - } + } } } } @@ -104,64 +104,64 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { print "graph_scale no\n"; print "graph_title Memory usage in % per VM\n"; print "graph_vlabel % of Memory usage\n"; - print "graph_info The Graph shows the Memory usage in % per VM\n"; + print "graph_info The Graph shows the Memory usage in % per VM\n"; foreach my $line(@lines) { - if( $line =~ /(? 2 ){ - $vm[$count]=$entry; - $count++; - } - } - $vm[3] =~ s/\.vmx//; - my $cat = clean_vmname($vm[3]); - if( $cat =~ /(? 0 ){ - print $cat,"_pmem.draw STACK\n"; - } else { - print $cat,"_pmem.draw AREA\n"; - } - $lcount++; - print $cat,"_pmem.label $vm[3]\n"; - print $cat,"_pmem.type GAUGE\n"; - } - } - } - } - - if( $type eq "mem" ) { - print $base_config; - print "graph_args --base 1024 -r --lower-limit 0\n"; - print "graph_title absolute Memory usage per VM\n"; - print "graph_vlabel Memory usage\n"; - print "graph_info The Graph shows the absolute Memory usage per VM\n"; - foreach my $line(@lines) { - if( $line =~ /(? 2 ){ $vm[$count]=$entry; $count++; } } - $vm[3] = clean_vmname($vm[3]); - if( $vm[3] =~ /(? 0 ){ + print $cat,"_pmem.draw STACK\n"; + } else { + print $cat,"_pmem.draw AREA\n"; + } + $lcount++; + print $cat,"_pmem.label $vm[3]\n"; + print $cat,"_pmem.type GAUGE\n"; + } + } + } + } + + if( $type eq "mem" ) { + print $base_config; + print "graph_args --base 1024 -r --lower-limit 0\n"; + print "graph_title absolute Memory usage per VM\n"; + print "graph_vlabel Memory usage\n"; + print "graph_info The Graph shows the absolute Memory usage per VM\n"; + foreach my $line(@lines) { + if( $line =~ /(? 2 ){ + $vm[$count]=$entry; + $count++; + } + } + $vm[3] = clean_vmname($vm[3]); + if( $vm[3] =~ /(? 0 ){ print "$vm[3]_mem.draw STACK\n"; } else { print "$vm[3]_mem.draw AREA\n"; } print "$vm[3]_mem.label $vm[3]\n"; - print "$vm[3]_mem.type GAUGE\n"; - $lcount++; - } + print "$vm[3]_mem.type GAUGE\n"; + $lcount++; + } } - } + } } } else { @@ -169,7 +169,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { if( $line =~ /(? 2 ){ $vm[$count]=$entry; @@ -177,7 +177,7 @@ if ( exists $ARGV[0] and $ARGV[0] eq "config" ) { } } $vm[3] = clean_vmname($vm[3]); - if( $vm[3] =~ /(? $_ENV['IceScret'] if (count($argv)==1) { do_count(); - } + } -switch ($argv[1]) +switch ($argv[1]) { case 'autoconf': do_autoconf(); break; - + case 'config': do_config(); break; - + default: do_count(); break; @@ -103,7 +103,7 @@ exit(1); function IceConnect() { global $ICE, $host, $port, $IceProfile, $IceSecret; - try + try { Ice_loadProfile($IceProfile); $iceproxy = $ICE->stringToProxy("Meta:tcp -h $host -p $port"); @@ -146,15 +146,15 @@ function do_config_header() function do_config_data() { global $ICE, $IceSecret; - - try + + try { $metaServer = IceConnect(); $AdefaultConf = $metaServer->getDefaultConf(); - + $AvirtualServer = $metaServer->getAllServers(); - foreach ($AvirtualServer as $numserver=>$s) + foreach ($AvirtualServer as $numserver=>$s) { $serverid = $s->ice_context($IceSecret)->id(); $servename = $s->ice_context($IceSecret)->getConf( 'registername'); @@ -189,14 +189,14 @@ function do_count() $totalMaxUsers="0"; $totalConnectedUsers="0"; - try + try { $metaServer = IceConnect(); $AdefaultConf = $metaServer->getDefaultConf(); - + $AvirtualServer = $metaServer->getAllServers(); - foreach ($AvirtualServer as $numserver=>$s) + foreach ($AvirtualServer as $numserver=>$s) { $maxusers = $s->ice_context($IceSecret)->getConf( 'users' ); if (!$maxusers) $maxusers = $AdefaultConf['users']; @@ -215,7 +215,7 @@ function do_count() fwrite(STDOUT, "vserver_".$serverid."_maxusers.value ".$maxusers."\n"); fwrite(STDOUT, "vserver_".$serverid."_online.value ".$connectedUsers."\n"); } - + fwrite(STDOUT, "total_maxusers.value ".$totalMaxUsers."\n"); fwrite(STDOUT, "total_online.value ".$totalConnectedUsers."\n"); exit(0); @@ -227,4 +227,4 @@ function do_count() } } -?> \ No newline at end of file +?> diff --git a/plugins/voip/murmur_users b/plugins/voip/murmur_users index 9da3df2a..b3a64a6f 100755 --- a/plugins/voip/murmur_users +++ b/plugins/voip/murmur_users @@ -7,7 +7,7 @@ error_reporting( E_ALL &!E_NOTICE); //to avoid of the crap generation ///////////////////////////////////////////////////////////////////////////////////////////////////// Murmur users online grahpher -ver 0.2alpha 2008.12.02, 20:32 +ver 0.2alpha 2008.12.02, 20:32 author _KaszpiR_ kaszpir at gmail dot com code is under GPL @@ -18,12 +18,12 @@ Requirements: Notice: - script allows the usage of the 'config' and 'autoconf' parameters during startup, make fure you edt config section before running it -- $limit - number of lines to tail from the lgo file, better keep it below 5000 for lower cpu load, +- $limit - number of lines to tail from the lgo file, better keep it below 5000 for lower cpu load, additionally on busy servers you can keep it really low, suggested 3x maximum number of users online -- tested on +- tested on PHP 5.2.6-3 with Suhosin-Patch 0.9.6.2 (cli) (built: Aug 21 2008 17:02:32) murmur 1.1.4 precompiled binaries from sourceforge net, all running under debian etch -- this is not the best way to get users connected to the murmur server, maybe in the beginningn of the 2009 gonna make another script +- this is not the best way to get users connected to the murmur server, maybe in the beginning of the 2009 I will make another script Known issues and limitations: - counts all users on the server not respecting different server instances @@ -60,7 +60,7 @@ if(isset($argv[1]) && $argv[1] == "autoconf") { fwrite(STDOUT, "Yes\n"); } - else + else { fwrite(STDOUT, "No\n"); fwrite(STDERR, "check if '$logfile' exists and it is allowed to be read by munin user group\n"); @@ -82,7 +82,7 @@ if(isset($argv[1]) && $argv[1] == "config") fwrite(STDOUT, "murmur.label Users on server\n"); fwrite(STDOUT, "murmur.type GAUGE\n"); return 0; - + }else { echo "check if '$logfile' exists and it is allowed to be read by munin user group\n"; return 1; @@ -95,7 +95,7 @@ if(isset($argv[1]) && $argv[1] == "config") // do the magic if(!$limit || ($limit >=5000 ) || $limit <= 0) $limit = 5000; $out = shell_exec("tail -n ".$limit." \"".$logfile."\""); -$fp = split("\n",$out); +$fp = explode("\n",$out); if(!count(@$fp)) { fwrite(STDOUT, "0\n"); return 1; @@ -123,17 +123,17 @@ for($i=count($fp);$i>(count($fp)-$limit);--$i) if(!array_key_exists($nick,$seen)){ if( strpos($msg," Connection closed")!==FALSE - || strpos($msg," Tiemout")!==FALSE + || strpos($msg," Timeout")!==FALSE ){ - $seen[$nick]['online'] = 0; + $seen[$nick]['online'] = 0; $offline+=1; } else { - $seen[$nick]['online'] = 1; + $seen[$nick]['online'] = 1; $online+=1; - + } } } @@ -147,4 +147,4 @@ return 0; ///////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////// //end of file -?> \ No newline at end of file +?> diff --git a/plugins/voip/murmurice_host_port_id_description b/plugins/voip/murmurice_host_port_id_description index 2deeb8f1..fb8c8de2 100755 --- a/plugins/voip/murmurice_host_port_id_description +++ b/plugins/voip/murmurice_host_port_id_description @@ -7,7 +7,7 @@ error_reporting( E_ALL & ~E_NOTICE ); //to avoid of the crap generation ///////////////////////////////////////////////////////////////////////////////////////////////////// Murmur users online grapher PHP using ICE -ver 0.4 2011.06.10, 15:44 +ver 0.4 2011.06.10, 15:44 author _KaszpiR_ kaszpir at gmail dot com code is under GPL @@ -21,7 +21,7 @@ Requirements: Configuration: 1. link creation -create a symlink to thie file in the following format: +create a symlink to the file in the following format: murmurice_ip_port_sid_description where ip/port correspond to the ip and port on which the mumble ice interface listens to sid is the server id we want to query (mumble can run multiple servers ) @@ -31,8 +31,8 @@ By default script tries to connect 127.0.0.1:6502 and query server id 1 2. ice profile configuration This is not needed with ice 3.4.1 - Scroll down in this file and change - $ice_profile = 'Murmur'; + Scroll down in this file and change + $ice_profile = 'Murmur'; to the profile that is installed on the server, this is required if you have multiple Ice profiles for various applicaitions. @@ -43,7 +43,7 @@ channels number = number of existing channels on server (with temoprary ones) players number = number of all connected users on server registered number = number of connected and registered users on server unregistered number = number of connected and not registered users on server -chanlinks number = linked chanels +chanlinks number = linked channels error number = this is set to 1 when there is error with communicatind with server via ice On any error script exits with status 1, otherwise it is 0. @@ -82,7 +82,7 @@ $ice_profile = 'Murmur'; -// Define STDIN in case if it is not already defined by PHP for some reason +// Define STDIN in case if it is not already defined by PHP for some reason if(!defined("STDIN")) { define("STDIN", fopen('php://stdin','r')); @@ -108,7 +108,7 @@ if(isset($argv[1]) && $argv[1] == "autoconf") if(FALSE) { } - else + else { fwrite(STDOUT, "No\n"); fwrite(STDERR, "symlink ".$argv[0]." to somethilg like ".$argv[0]."_127.0.0.1_6502_1_description_here \n"); @@ -123,18 +123,18 @@ if(isset($argv[1]) && $argv[1] == "config") { if(TRUE) { - // yea dirty hack + // yea dirty hack echo "graph_title Mumble Users".($desc?" on ".str_replace("_"," ",$desc):"")."\n"; echo "graph_vlabel Connected Users\n"; echo "graph_category VoIP\n"; echo "graph_info This graph shows the number of connected users on a murmur server\n"; echo "channels.label Chans\n"; echo "players.label Total Users\n"; - echo "registered.label Registerd\n"; + echo "registered.label Registered\n"; echo "unregistered.label Unregistered\n"; echo "chanlinks.label Linked chans\n"; echo "error.label Server status error\n"; - + $arr=array("channels","players","registered","unregistered","chanlinks","error"); foreach($arr as $field){ echo "".$field.".draw LINE1\n"; @@ -143,7 +143,7 @@ if(isset($argv[1]) && $argv[1] == "config") return 0; - + }else { echo "RTFM\n"; return 1; @@ -174,7 +174,7 @@ try { $initData->properties = Ice_createProperties(); $initData->properties->setProperty("Ice.MessageSizeMax", "65536"); $ICE = Ice_initialize($initData); - } + } $base = $ICE->stringToProxy("Meta:tcp -h ".$ip." -p ".$port); @@ -222,4 +222,4 @@ echo "unregistered.value ".$online_noreg."\n"; echo "chanlinks.value ".$links."\n"; echo "error.value 0\n"; return 0; -?> \ No newline at end of file +?> diff --git a/plugins/voip/murmurice_host_port_id_description_avg b/plugins/voip/murmurice_host_port_id_description_avg index 600232c0..4d0ddfee 100755 --- a/plugins/voip/murmurice_host_port_id_description_avg +++ b/plugins/voip/murmurice_host_port_id_description_avg @@ -7,7 +7,7 @@ error_reporting( E_ALL & ~E_NOTICE ); //to avoid of the crap generation ///////////////////////////////////////////////////////////////////////////////////////////////////// Murmur users online grapher PHP using ICE, shows averages -ver 0.2 2011.06.10, 15:44 +ver 0.2 2011.06.10, 15:44 author _KaszpiR_ kaszpir at gmail dot com code is under GPL @@ -31,8 +31,8 @@ By default script tries to connect 127.0.0.1:6502 and query server id 1 2. ice profile configuration This is not needed with ice 3.4.1 - Scroll down in this file and change - $ice_profile = 'Murmur'; + Scroll down in this file and change + $ice_profile = 'Murmur'; to the profile that is installed on the server, this is required if you have multiple Ice profiles for various applicaitions. @@ -80,7 +80,7 @@ Todo: $ice_profile = 'Murmur'; -// Define STDIN in case if it is not already defined by PHP for some reason +// Define STDIN in case if it is not already defined by PHP for some reason if(!defined("STDIN")) { define("STDIN", fopen('php://stdin','r')); @@ -106,7 +106,7 @@ if(isset($argv[1]) && $argv[1] == "autoconf") if(FALSE) { } - else + else { fwrite(STDOUT, "No\n"); fwrite(STDERR, "symlink ".$argv[0]." to somethilg like ".$argv[0]."_127.0.0.1_6502_1_description_here \n"); @@ -121,7 +121,7 @@ if(isset($argv[1]) && $argv[1] == "config") { if(TRUE) { - // yea dirty hack + // yea dirty hack echo "graph_title Mumble Average Users per chan ".($desc?" on ".str_replace("_"," ",$desc):"")."\n"; echo "graph_vlabel Number\n"; echo "graph_category VoIP\n"; @@ -133,7 +133,7 @@ if(isset($argv[1]) && $argv[1] == "config") echo "selfdeaf.label Self Deafen\n"; echo "avgperchan.label Users per chan\n"; echo "error.label Server status error\n"; - + $arr=array("mute","deaf","suppress","selfmute","selfdeaf","avgperchan","error"); foreach($arr as $field){ echo "".$field.".draw LINE1\n"; @@ -142,7 +142,7 @@ if(isset($argv[1]) && $argv[1] == "config") return 0; - + }else { echo "RTFM\n"; return 1; @@ -177,7 +177,7 @@ try { $initData->properties = Ice_createProperties(); $initData->properties->setProperty("Ice.MessageSizeMax", "65536"); $ICE = Ice_initialize($initData); - } + } $base = $ICE->stringToProxy("Meta:tcp -h ".$ip." -p ".$port); @@ -238,4 +238,4 @@ echo "selfdeaf.value ".$selfdeaf."\n"; echo "avgperchan.value ".$avg."\n"; echo "error.value 0\n"; return 0; -?> \ No newline at end of file +?> diff --git a/plugins/voip/zapchans b/plugins/voip/zapchans index c0304f72..aecbd11f 100755 --- a/plugins/voip/zapchans +++ b/plugins/voip/zapchans @@ -34,7 +34,7 @@ if [ "$1" = "config" ]; then echo 'graph_args --upper-limit 22 -l 0' echo 'graph_scale no' echo 'graph_vlabel active connections' - echo 'graph_category VOIP' + echo 'graph_category voip' echo 'graph_period second' echo 'graph_info This graph shows the number of PRI lines in use.' echo 'calls.label calls' @@ -47,6 +47,6 @@ fi # cron, and dumps its output to a separate file for this plugin to read later on. Letting # Munin plugins run as root is a security hole. -zap=`asterisk -rx 'zap show channels' |grep -v "[[:digit:]]*[[:space:]] local" |grep -vc "Chan Extension"` +zap=$(asterisk -rx 'zap show channels' |grep -v "[[:digit:]]*[[:space:]] local" |grep -vc "Chan Extension") echo "calls.value $zap" diff --git a/plugins/voldemort/voldemort b/plugins/voldemort/voldemort old mode 100644 new mode 100755 index c11f9469..4c279f84 --- a/plugins/voldemort/voldemort +++ b/plugins/voldemort/voldemort @@ -6,39 +6,37 @@ require 'rubygems' require 'jmx4r' - -#%# family=auto -#%# capabilities=autoconf +# %# family=auto +# %# capabilities=autoconf # friendly name => result of listPerfStatsKeys via JMX keys = { - "Throughput" => { "vlabel" => "rate", - "type" => "ABSOLUTE", - "values" => ["all_operation_throughput","delete_throughput", "get_all_throughput", "get_throughput", "put_throughput"] - }, - "Number of Calls" => { "vlabel" => "counts", - "type" => "COUNTER", - "values" => ["number_of_calls_to_delete","number_of_calls_to_get", "number_of_calls_to_get_all", - "number_of_calls_to_put", "number_of_exceptions"] - } + 'Throughput' => { 'vlabel' => 'rate', + 'type' => 'ABSOLUTE', + 'values' => %w[all_operation_throughput delete_throughput get_all_throughput get_throughput put_throughput] }, + 'Number of Calls' => { 'vlabel' => 'counts', + 'type' => 'COUNTER', + 'values' => %w[number_of_calls_to_delete number_of_calls_to_get number_of_calls_to_get_all + number_of_calls_to_put number_of_exceptions] } } -if ARGV[0] == "config" +case ARGV[0] +when 'config' keys.each_key do |key| - puts "multigraph voldemort_#{key.gsub(" ", "_")}" + puts "multigraph voldemort_#{key.gsub(' ', '_')}" puts "graph_title #{key}" - puts "graph_scale no" - puts "graph_category search" + puts 'graph_scale no' + puts 'graph_category search' puts "graph_vlabel #{keys[key]['vlabel']}" - for data in keys[key]['values'] do + keys[key]['values'].each do |data| puts "#{data}.type #{keys[key]['type']}" - puts "#{data}.label #{data.gsub("_", " ")}" + puts "#{data}.label #{data.gsub('_', ' ')}" end puts end exit 0 -elsif ARGV[0] == "autoconf" - puts "yes" +when 'autoconf' + puts 'yes' exit 0 else @@ -49,16 +47,18 @@ else # Make the platform MBean server able to work with JBossAS MBeans # JAVA_OPTS="$JAVA_OPTS -Djavax.management.builder.initial=org.jboss.system.server.jmx.MBeanServerBuilderImpl" # JBOSS_CLASSPATH="/opt/webtrends/jboss/bin/mbean" - JMX::MBean.establish_connection :port => 5400 - vs = JMX::MBean.find_by_name "voldemort.store.stats.aggregate:type=aggregate-perf" + JMX::MBean.establish_connection port: 5400 + vs = JMX::MBean.find_by_name 'voldemort.store.stats.aggregate:type=aggregate-perf' keys.each_key do |key| - puts "multigraph voldemort_#{key.gsub(" ", "_")}" + puts "multigraph voldemort_#{key.gsub(' ', '_')}" for data in keys[key]['values'] do - puts "#{data}.value #{begin vs.send("#{data}") rescue 0 end}" + puts "#{data}.value #{begin begin + vs.send(data.to_s) + rescue StandardError + 0 + end end}" end puts end end - - diff --git a/plugins/vpn/openvpn_as_mtime b/plugins/vpn/openvpn_as_mtime index 7dbf90ac..eb8ce58a 100755 --- a/plugins/vpn/openvpn_as_mtime +++ b/plugins/vpn/openvpn_as_mtime @@ -36,7 +36,7 @@ GPLv2 if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -62,7 +62,7 @@ if [ "$1" = "config" ]; then echo "time.label Users" echo "time.type GAUGE" echo "time.min 0" - + exit 0 fi @@ -78,5 +78,5 @@ fi # Total is total time between number of users between 60 for give it in minutes echo "time.value $(($(($SUM / $TOTU)) / 60))" - + diff --git a/plugins/vpn/openvpn_as_traffic b/plugins/vpn/openvpn_as_traffic index d59e7fee..718ff7f1 100755 --- a/plugins/vpn/openvpn_as_traffic +++ b/plugins/vpn/openvpn_as_traffic @@ -40,7 +40,7 @@ SUM2=0 NOW=`date +%s` if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi diff --git a/plugins/vpn/openvpn_as_ttime b/plugins/vpn/openvpn_as_ttime index 06809a92..2f514cba 100755 --- a/plugins/vpn/openvpn_as_ttime +++ b/plugins/vpn/openvpn_as_ttime @@ -36,7 +36,7 @@ GPLv2 if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -73,7 +73,7 @@ if [ "$1" = "config" ]; then echo "$NAME.min 0" COUNT=$((COUNT + 1)) done - + exit 0 fi diff --git a/plugins/vpn/openvpn_as_users b/plugins/vpn/openvpn_as_users index 3d655cb6..1fe54eb5 100755 --- a/plugins/vpn/openvpn_as_users +++ b/plugins/vpn/openvpn_as_users @@ -33,7 +33,7 @@ GPLv2 if [ "$1" = "autoconf" ]; then - echo yes + echo yes exit 0 fi @@ -59,9 +59,9 @@ if [ "$1" = "config" ]; then echo "users.type GAUGE" echo "users.draw AREA" echo "users.min 0" - - echo "users.warning $((VAL2 * $WARN / 100))" - echo "users.critical $VAL2" + + echo "users.warning $((VAL2 * $WARN / 100))" + echo "users.critical $VAL2" echo "limit.label Limit" @@ -76,5 +76,5 @@ fi echo "users.value $VAL1" echo "limit.value $VAL2" - + diff --git a/plugins/vpn/openvpn_multi b/plugins/vpn/openvpn_multi index bafcc230..2ddbcc82 100755 --- a/plugins/vpn/openvpn_multi +++ b/plugins/vpn/openvpn_multi @@ -83,11 +83,10 @@ sub config { sub autoconf { if (-e $statusfile) { print "yes\n"; - exit 0; } else { print "no\n"; - exit 1; } + exit 0; } sub report { diff --git a/plugins/vserver/vserver_jiffies b/plugins/vserver/vserver_jiffies index caf196c7..f57c41e4 100755 --- a/plugins/vserver/vserver_jiffies +++ b/plugins/vserver/vserver_jiffies @@ -27,7 +27,6 @@ VSERVERS=(${=vservers}) STRIPDOMAIN="$stripdomain" INFO=($(sed 's/.*:\t//' /proc/virtual/info 2>/dev/null || echo '')) -KCIN="$[ 16#${INFO[2]} ]"; NAMELOC="nsproxy" @@ -57,7 +56,7 @@ if [[ "$1" = "config" ]]; then echo 'graph_vlabel jiffies used per ${graph_period}' echo 'graph_info Shows jiffies used on each vserver.' - for i in $XIDS[@]; do + for i in $XIDS[@]; do LABEL=$(sed -n '/NodeName/s/^NodeName:[[:space:]]*//p' /proc/virtual/$i/$NAMELOC) LABEL=${LABEL%$STRIPDOMAIN} NAME=${LABEL//./_} @@ -75,7 +74,7 @@ if [[ "$1" = "config" ]]; then exit 0 fi -for i in $XIDS[@]; do +for i in $XIDS[@]; do LABEL=$(sed -n '/NodeName/s/^NodeName:[[:space:]]*//p' /proc/virtual/$i/$NAMELOC) LABEL=${LABEL%$STRIPDOMAIN} NAME=${LABEL//./_} diff --git a/plugins/vserver/vserver_limit_hits b/plugins/vserver/vserver_limit_hits index d4a690cc..d896ac1e 100755 --- a/plugins/vserver/vserver_limit_hits +++ b/plugins/vserver/vserver_limit_hits @@ -23,14 +23,14 @@ # # NOTE: If no configuration variables are set, the defaults will be used -# Example /etc/munin/plugin-conf.d/munin-node +# Example /etc/munin/plugin-conf.d/munin-node # # The first group monitors the vservers named "vserver1 vserver2 # vserver3 vserver4" and looks to see if the resource limit has been # breached, if so it sends a message to nagios via send_nsca, and # sends an email to notify that this has happened. # -# The second monitors the vservers "vserver5 vserver6 vserver7" and +# The second monitors the vservers "vserver5 vserver6 vserver7" and # has no limit notifications turned on. # # The third monitors all vservers on the system, in one graph, and it has @@ -73,7 +73,7 @@ # version 0.5 - 2008 Apr 12 - Chris Wilson # - Changed to display limit hits instead of resource usage # - Adapt to latest vserver kernel (lack of some variables in /proc/virtual) -# Note that your vserver names may change if the contents of +# Note that your vserver names may change if the contents of # /etc/vservers/* do not match the nodenames. Also you must specify # the vservers variable with context IDs (XIDs) rather than names. @@ -91,7 +91,7 @@ if [ "$1" = "config" ]; then echo "graph_vlabel $resource limit hits" echo 'graph_category virtualization' echo "graph_info Shows number of hits on $resource limits by each vserver.'" - + for vserver_xid in $vservers ; do longname=`/usr/sbin/vuname --xid $vserver_xid NODENAME | cut -f2` name=`echo $longname | cut -d. -f1` diff --git a/plugins/vserver/vserver_limits b/plugins/vserver/vserver_limits index ea4317c5..f724eed0 100755 --- a/plugins/vserver/vserver_limits +++ b/plugins/vserver/vserver_limits @@ -23,7 +23,7 @@ # # NOTE: If no configuration variables are set, the defaults will be used -# Example /etc/munin/plugin-conf.d/munin-node +# Example /etc/munin/plugin-conf.d/munin-node # # [vserver_limits_RSS] # user root @@ -68,7 +68,7 @@ if [ "$1" = "config" ]; then echo "graph_vlabel $resource limits" echo 'graph_category virtualization' echo "graph_info Shows current $resource limits for each vserver.'" - + for vserver_xid in $vservers ; do longname=`/usr/sbin/vuname --xid $vserver_xid NODENAME | cut -f2` name=`echo $longname | cut -d. -f1` diff --git a/plugins/weather/buienradar_ b/plugins/weather/buienradar_ index 663b3cd0..686067dd 100755 --- a/plugins/weather/buienradar_ +++ b/plugins/weather/buienradar_ @@ -6,12 +6,12 @@ # BuienRadar: http://www.buienradar.nl # # -# This program is free software: you can redistribute it and/or modify it under the terms of the -# GNU General Public License as published by the Free Software Foundation, either version 3 of +# This program is free software: you can redistribute it and/or modify it under the terms of the +# GNU General Public License as published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # -# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License along with this program. @@ -41,9 +41,9 @@ # env.rain = 'yes' # # -# +# ### Installation Example -# Below is a example to monitor station Venlo (6391). Codes for the station can be seen at the end of +# Below is a example to monitor station Venlo (6391). Codes for the station can be seen at the end of # this source or can be show with the command './buienradar_ stations' # # wget -O /usr/lib/munin/plugins/buienradar_ http://exchange.munin-monitoring.org/plugins/buienradar_/version/1/download @@ -101,7 +101,7 @@ if($stationName eq "") { # Darn, no station name found. This means we don't provided a correct id. die "Error:\n" ."You have provided an invalid station code. (".$station.") Please use a correct one.\n\n" - .print_stations(); + .print_stations(); } @@ -152,7 +152,7 @@ if($RAIN eq "yes") { print "rain.value ".$doc->findnodes($stationNode."/rege # Station codes # Static, I know I could do this dynamically, but since they don't change regularly I choose this way -# so I could present two nice rows whitout having to write a buncgh of code... :P +# so I could present two nice rows without having to write a buncgh of code... :P sub print_stations { return "Available station codes, updated last on April 4th 2011\n\n" ." [6391] Venlo (Station Arcen) [6275] Arnhem (Station Arnhem)\n" diff --git a/plugins/weather/example-graphs/wunderground_STATION-week.png b/plugins/weather/example-graphs/wunderground_STATION-week.png new file mode 100644 index 00000000..e978abb2 Binary files /dev/null and b/plugins/weather/example-graphs/wunderground_STATION-week.png differ diff --git a/plugins/weather/humidity-sensor b/plugins/weather/humidity-sensor index 3c96b8f7..40d6b8e5 100755 --- a/plugins/weather/humidity-sensor +++ b/plugins/weather/humidity-sensor @@ -29,11 +29,10 @@ if (! eval "require LWP::UserAgent;") if (defined $ARGV[0] and $ARGV[0] eq "autoconf") { if (defined $ret) { print "no ($ret)\n"; - exit 1; } else { print "yes\n"; - exit 0; } + exit 0; } my $datasource = "http://weather.noaa.gov/pub/data/observations/metar/decoded/"; diff --git a/plugins/weather/novra_s300 b/plugins/weather/novra_s300 index ca20cbc7..e82fa9be 100755 --- a/plugins/weather/novra_s300 +++ b/plugins/weather/novra_s300 @@ -16,7 +16,7 @@ # @email icedown@gmail.com # # Usage: -# Copy this to your plugin folder (default: /usr/share/munin/plugins) +# Copy this to your plugin folder (default: /usr/share/munin/plugins) # Edit is file, replacing CMCS, IP, and PW with your values # Make a symlink to your active plugins folder (default: /etc/munin/plugins) # Finally run munin-node-config and restart munin-node @@ -59,15 +59,15 @@ if (defined($ARGV[0]) and ($ARGV[0] eq 'autoconf')) { if($status =~ m/Login unsuccessful/) { print "No (Invalid receiver details)\n"; exit(0); - } + } print "yes\n"; exit(0); } - + print "no (Cannot locate CMCS)\n"; exit(0); - - + + } require XML::Simple; diff --git a/plugins/weather/openweather_ b/plugins/weather/openweather_ index 45b9610d..ffec4ea9 100755 --- a/plugins/weather/openweather_ +++ b/plugins/weather/openweather_ @@ -18,12 +18,11 @@ # [openweather_*] # env.apikey XYZ -query_string=$(printf '%s' "${0#*_}" | tr '_' '=') +location=$(printf '%s' "${0#*_}" | tr '_' '=') +query_string="${location}&appid=${apikey}" plugin_name=$( basename $0 ) OWAPI=$( curl -s "http://api.openweathermap.org/data/2.5/weather?mode=xml&${query_string}") -# API returns temp in K, we have to convert it in C -# &units=metric would change that ;-) KELVIN_BIAS=273 CITY=$( expr "$OWAPI" : '.*\request(HTTP::Request->new('GET',$url)); diff --git a/plugins/weather/weather_ b/plugins/weather/weather_ index d91580b7..d4807063 100755 --- a/plugins/weather/weather_ +++ b/plugins/weather/weather_ @@ -3,59 +3,59 @@ import os import re import sys -import urllib +from urllib.request import urlopen url = 'http://www.weather.com/weather/today/%s' -re_tmp = re.compile('realTemp: "(\d+)"') -re_hum = re.compile('relativeHumidity: "(\d+)"') -re_loc = re.compile('locName: "([\w ]+)"') +re_tmp = re.compile(r'realTemp: "(\d+)"') +re_hum = re.compile(r'relativeHumidity: "(\d+)"') +re_loc = re.compile(r'locName: "([\w ]+)"') -#code = sys.argv[0][(sys.argv[0].rfind('_') + 1):] +# code = sys.argv[0][(sys.argv[0].rfind('_') + 1):] code = os.environ.get('code', sys.argv[0][(sys.argv[0].rfind('_') + 1):]) -if code == None: sys.exit(1) - -if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - print "yes" +if not code: + sys.exit(1) +elif len(sys.argv) == 2 and sys.argv[1] == "autoconf": + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": - u = urllib.urlopen(url % code) - txt = u.read() - u.close() + u = urlopen(url % code) + txt = u.read() + u.close() - LOC_list = re_loc.findall(txt) - - if len(LOC_list): - LOC = LOC_list[0] - else: - LOC = "Unknown" + LOC_list = re_loc.findall(txt) - print 'graph_title Weather in %s' % LOC - print 'graph_vlabel Temperature and Humidity' - print 'graph_category sensors' + if len(LOC_list): + LOC = LOC_list[0] + else: + LOC = "Unknown" - print 'temperature.label Temperature' - print 'humidity.label Humidity' + print('graph_title Weather in %s' % LOC) + print('graph_vlabel Temperature and Humidity') + print('graph_category sensors') - print 'graph_args --base 1000 -l 0' + print('temperature.label Temperature') + print('humidity.label Humidity') + + print('graph_args --base 1000 -l 0') else: - u = urllib.urlopen(url % code) - txt = u.read() - u.close() + u = urlopen(url % code) + txt = u.read() + u.close() - TMP_F_list = re_tmp.findall(txt) - HUM_list = re_hum.findall(txt) + TMP_F_list = re_tmp.findall(txt) + HUM_list = re_hum.findall(txt) - if len(HUM_list): - HUM = HUM_list[0] - else: - sys.exit(1) + if len(HUM_list): + HUM = HUM_list[0] + else: + sys.exit(1) - if len(TMP_F_list): - TMP_F = TMP_F_list[0] - TMP_C = (int(TMP_F) - 32) * 5/9 - else: - sys.exit(1) + if len(TMP_F_list): + TMP_F = TMP_F_list[0] + TMP_C = (int(TMP_F) - 32) * 5 / 9 + else: + sys.exit(1) - print 'temperature.value %s' % TMP_C - print 'humidity.value %s' % HUM + print('temperature.value %s' % TMP_C) + print('humidity.value %s' % HUM) diff --git a/plugins/weather/weather_press_ b/plugins/weather/weather_press_ index b5dc8623..12cc9936 100755 --- a/plugins/weather/weather_press_ +++ b/plugins/weather/weather_press_ @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ munin US NOAA weather plugin (http://tgftp.nws.noaa.gov) @@ -13,38 +13,32 @@ Linux users might need to adjust the shebang. """ import sys -import urllib +from urllib.request import urlopen import re url = 'http://tgftp.nws.noaa.gov/data/observations/metar/decoded/%s.TXT' -re_hPa = re.compile('Pressure.*\((\d+) hPa\)') +re_hpa = re.compile(r'Pressure.*\((\d+) hPa\)') -code = sys.argv[0][(sys.argv[0].rfind('_')+1):] -if code == None: sys.exit(1) - -if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - - print "yes" - +code = sys.argv[0][(sys.argv[0].rfind('_') + 1):] +if not code: + sys.exit(1) +elif len(sys.argv) == 2 and sys.argv[1] == "autoconf": + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": + print('graph_title Atmospheric pressure at code %s' % code) + print('graph_vlabel Pressure in hPa') + print('graph_category sensors') - print 'graph_title Atmospheric pressure at code %s' % code - print 'graph_vlabel Pressure in hPa' - print 'graph_category sensors' - - print 'pressure.label Pressure' - print 'pressure.type GAUGE' - print 'graph_args --base 1000 -l 850 -u 1050 --rigid' - print 'graph_scale no' - + print('pressure.label Pressure') + print('pressure.type GAUGE') + print('graph_args --base 1000 -l 850 -u 1050 --rigid') + print('graph_scale no') else: + u = urlopen(url % code) + txt = u.read() + u.close() - u = urllib.urlopen(url % code) - txt = u.read() - u.close() - - hPa = re_hPa.findall(txt)[0] - - print 'pressure.value %s' % hPa + hpa = re_hpa.findall(txt)[0] + print('pressure.value %s' % hpa) diff --git a/plugins/weather/weather_temp_ b/plugins/weather/weather_temp_ index eee22379..2d28b436 100755 --- a/plugins/weather/weather_temp_ +++ b/plugins/weather/weather_temp_ @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ munin US NOAA weather plugin (http://tgftp.nws.noaa.gov) @@ -13,41 +13,34 @@ Linux users might need to adjust the shebang. """ import sys -import urllib +from urllib.request import urlopen import re url = 'http://tgftp.nws.noaa.gov/data/observations/metar/decoded/%s.TXT' -re_C = re.compile('Temperature:.*\((-?\d+\.?\d?) C\)') -re_DewC = re.compile('Dew.*\((-?\d+\.?\d?) C\)') +re_C = re.compile(r'Temperature:.*\((-?\d+\.?\d?) C\)') +re_DewC = re.compile(r'Dew.*\((-?\d+\.?\d?) C\)') -code = sys.argv[0][(sys.argv[0].rfind('_')+1):] -if code == None: sys.exit(1) +code = sys.argv[0][(sys.argv[0].rfind('_') + 1):] -if len(sys.argv) == 2 and sys.argv[1] == "autoconf": - - print "yes" +if not code: + sys.exit(1) +elif len(sys.argv) == 2 and sys.argv[1] == "autoconf": + print("yes") elif len(sys.argv) == 2 and sys.argv[1] == "config": - - print 'graph_title Temperature and Dew Point at code %s' % code - print 'graph_vlabel Temperature and Dew Point in C' - print 'graph_category sensors' - - print 'temperature.label Temperature' - print 'dewpoint.label Dew Point' - - print 'graph_args --base 1000 -l 0' - + print('graph_title Temperature and Dew Point at code %s' % code) + print('graph_vlabel Temperature and Dew Point in C') + print('graph_category sensors') + print('temperature.label Temperature') + print('dewpoint.label Dew Point') + print('graph_args --base 1000 -l 0') else: + u = urlopen(url % code) + txt = u.read() + u.close() - u = urllib.urlopen(url % code) - txt = u.read() - u.close() - - C = re_C.findall(txt)[0] - DewC = re_DewC.findall(txt)[0] - - print 'temperature.value %s' % C - print 'dewpoint.value %s' % DewC - + C = re_C.findall(txt)[0] + DewC = re_DewC.findall(txt)[0] + print('temperature.value %s' % C) + print('dewpoint.value %s' % DewC) diff --git a/plugins/weather/wfrog b/plugins/weather/wfrog index 4fb5bd79..3c9d1243 100755 --- a/plugins/weather/wfrog +++ b/plugins/weather/wfrog @@ -19,8 +19,8 @@ use Data::Dumper; # 1. Install wfrog, get it up running with your weather station # 2. Locate your wfrog.csv file (wfrog creates after 10 mins) # 3. cd /etc/munin/plugins/ -# 4. ln -s /usr/share/munin/plugins/wfrog wfrog_temp -# 4. ln -s /usr/share/munin/plugins/wfrog wfrog_pressure +# 4. ln -s /usr/share/munin/plugins/wfrog wfrog_temp +# 4. ln -s /usr/share/munin/plugins/wfrog wfrog_pressure # 5. etc.. # 6. reload munin-node ;-) @@ -35,7 +35,7 @@ my %CONFIG = ( my $interesting; if ($0 =~ m#wfrog_(\w+)#) { - $interesting = $1; + $interesting = $1; } else { diff --git a/plugins/weather/wunderground_ b/plugins/weather/wunderground_ new file mode 100755 index 00000000..c81f0d87 --- /dev/null +++ b/plugins/weather/wunderground_ @@ -0,0 +1,361 @@ +#!/bin/sh +# -*- sh -*- + +: << =cut + +=head1 NAME + +wunderground - Plugin to monitor weather stations through Weather Underground + +The precipitation rate is recalculated from the daily cumulative sum +precipTotal, as a DERIVE value, rather than using the immediate precipRate as a +GAUGE. This allows to have more correct aggregates for the +weekly/monstly/yearly graphs, particularly in case of missing values. + +However, to work around the limitation that DERIVE only supports integers, the +decimal point is also shifted two places right (*100) from the reported value, +so even small values can be represented. The value is then rescaled to cancel +this shift (/100), and expressed per hour rather than second (*3600), as it is +the usual unit for precipitation. + +=head1 CONFIGURATION + + [wunderground] + env.api_key 6532d6454b8aa370768e63d6ba5a832e # this is the default; it seems to be what the website uses + env.station_id KCASANFR1708 + env.units metric # optional, this is the default + env.base_url # optional, default to https://api.weather.com/v2/pws/observations/current + env.connect_timeout 1 # optional, amount to wait for requests, in seconds + +Alternatively, the station_id can be encoded in the name of the symlink as +wunderground_STATIONID (e.g., wundergound_KCASANFR1708). This allows to monitor +multiple stations at once. The configuration can then omit the station_id (it +will be ignored if present), and only one section can be used for all instances +of the plugin. + + [wunderground_*] + env.api_key 6532d6454b8aa370768e63d6ba5a832e # this is the default; it seems to be what the website uses + env.units metric # optional, this is the default + env.base_url # optional, default to https://api.weather.com/v2/pws/observations/current + env.connect_timeout 1 # optional, amount to wait for requests, in seconds + +=head1 AUTHOR + +Olivier Mehani + +Copyright (C) 2020 Olivier Mehani + +=head1 LICENSE + +SPDX-License-Identifier: GPL-3.0-or-later + +=head1 MAGIC MARKERS + + #%# family=manual + +=cut + +# Example output +# +# curl 'https://api.weather.com/v2/pws/observations/current?apiKey=6532d6454b8aa370768e63d6ba5a832e&stationId=KCASANFR1708&numericPrecision=decimal&format=json&units=m' +#{"observations":[{"stationID":"KCASANFR1708","obsTimeUtc":"2020-06-15T06:30:54Z","obsTimeLocal":"2020-06-14 23:30:54","neighborhood":"Van Ness - Civic Center","softwareType":"Weather logger V3.0.8","country":"US","solarRadiation":null,"lon":-122.423,"realtimeFrequency":null,"epoch":1592202654,"lat":37.788,"uv":null,"winddir":null,"humidity":90.0,"qcStatus":1,"imperial":{"temp":58.6,"heatIndex":58.6,"dewpt":55.8,"windChill":null,"windSpeed":null,"windGust":null,"pressure":29.85,"precipRate":null,"precipTotal":null,"elev":187.0}}]} + +set -eu + +# shellcheck disable=SC1090 +. "${MUNIN_LIBDIR}/plugins/plugin.sh" + +if [ "${MUNIN_DEBUG:-0}" = 1 ]; then + set -x +fi + +PLUGIN_NAME="$(basename "${0}")" +STATION_ID="$(echo "${PLUGIN_NAME}" | sed 's/.*_//')" +# Use the station ID from the config only if the plugin doesn't specify one +STATION_ID=${STATION_ID:-${station_id:-KCASANFR1708}} + +API_KEY=${api_key:-6532d6454b8aa370768e63d6ba5a832e} +UNITS=${units:-metric} +BASE_URL=${base_url:-https://api.weather.com/v2/pws/observations/current} +CONNECT_TIMEOUT=${connect_timeout:-1} + +UNITS_ARG='&units=m' +DISTANCE_UNIT='m' +PRESSURE_UNIT='hPa' +PRECIPITATION_UNIT='mm' +SPEED_UNIT='km/h' +TEMP_UNIT='°C' +# https://en.wikipedia.org/wiki/Wind_chill#/media/File:Windchill_effect_en.svg +WIND_CHILL_CAUTION=-35: +WIND_CHILL_DANGER=-60: +# https://en.wikipedia.org/wiki/Heat_index#Table_of_values +HEAT_INDEX_CAUTION=27 +HEAT_INDEX_EXTREME_CAUTION=33 +HEAT_INDEX_DANGER=41 +HEAT_INDEX_EXTREME_DANGER=54 +if [ "${UNITS}" = "imperial" ]; then + UNITS_ARG='&units=e' + DISTANCE_UNIT='ft' + PRESSURE_UNIT='in' + PRECIPITATION_UNIT='in' + SPEED_UNIT='mph' + TEMP_UNIT='°F' + WIND_CHILL_CAUTION=-31: + WIND_CHILL_DANGER=-76:w + HEAT_INDEX_CAUTION=80 + HEAT_INDEX_EXTREME_CAUTION=91 + HEAT_INDEX_DANGER=105 + HEAT_INDEX_EXTREME_DANGER=130 +fi +API_URL="${BASE_URL}?apiKey=${API_KEY}&stationId=${STATION_ID}&numericPrecision=decimal&format=json${UNITS_ARG}" + +check_deps() { + for CMD in curl jq; do + if ! command -v "${CMD}" >/dev/null; then + echo "no (${CMD} not found)" + fi + done +} + +CURL_ARGS="-s --connect-timeout ${CONNECT_TIMEOUT}" +fetch() { + # shellcheck disable=SC2086 + curl -f ${CURL_ARGS} "$@" \ + || { echo "error fetching ${*}" >&2; false; } +} + +config() { + local STATION_INFO="in \(.neighborhood), \(.country) reported by station \(.stationID) (\(.lon), \(.lat), \(.${UNITS}.elev) m) at \(.obsTimeLocal) (\(.obsTimeUtc))" + fetch "${API_URL}" | jq -r ".observations[0] + | @text \" +multigraph wunderground_${STATION_ID} +graph_title Weather in \(.neighborhood) +graph_info Weather ${STATION_INFO} +graph_category weather +graph_vlabel Temperature / UV Index / Precipitation +temp.label Temperature [${TEMP_UNIT}] +windChill.label Wind chill [${TEMP_UNIT}] +heatIndex.label Heat index [$TEMP_UNIT] +uv.label UV index +precipRate.draw AREA +precipRate.label Precipitation rate [${PRECIPITATION_UNIT} per hour] + +multigraph wunderground_${STATION_ID}.air_humidity +graph_title Humidity in \(.neighborhood) +graph_info Humidity ${STATION_INFO} +graph_category weather +graph_args -l 0 --upper-limit 100 +graph_vlabel Humidity [%] +humidity.label Humidity +humidity.min 0 +humidity.max 100 + +multigraph wunderground_${STATION_ID}.location +graph_title Location of \(.stationID) +graph_info Track geographic coordinates of station \(.stationID); last: \(.lon), \(.lat), \(.${UNITS}.elev) ${DISTANCE_UNIT} at \(.obsTimeLocal) (\(.obsTimeUtc)) +graph_category weather +graph_scale no +graph_vlabel lon/lat [°] / elevation [${DISTANCE_UNIT}] +lon.label Longitude [°] +lat.label Latitude [°] +elev.label Elevation [${DISTANCE_UNIT}] + +multigraph wunderground_${STATION_ID}.precipitation +graph_title Precipitation in \(.neighborhood) +graph_info Precipitation ${STATION_INFO} +graph_category weather +graph_args -l 0 --base 1000 +graph_vlabel Precipitation [${PRECIPITATION_UNIT} per hour] +precipRate.label Precipitation rate +avgRate.label Average precipitation rate +avgRate.type DERIVE +avgRate.draw AREA +avgRate.min 0 +avgRate.cdef avgRate,36,* + +multigraph wunderground_${STATION_ID}.air_pressure +graph_title Pressure in \(.neighborhood) +graph_info Pressure ${STATION_INFO} +graph_category weather +graph_scale no +graph_vlabel Pressure [${PRESSURE_UNIT}] +pressure.label Pressure + +multigraph wunderground_${STATION_ID}.solar_radiation +graph_title Solar radiation in \(.neighborhood) +graph_info Solar radiation ${STATION_INFO} +graph_category weather +graph_args -l 0 --base 1000 +graph_vlabel Solar radiation [W/m^2] +solarRadiation.label Solar radiation + +multigraph wunderground_${STATION_ID}.temperature +temp.label Temperature +graph_title Temperature in \(.neighborhood) +graph_info Temperature ${STATION_INFO} +graph_category weather +graph_vlabel Temperature [${TEMP_UNIT}] +temp.label Temperature + +dewpt.label Dew point +dewpt.info Temperature to which air must be cooled to become saturated with water vapor. When cooled further, the airborne water vapor will condense to form liquid water (dew). When air cools to its dew point through contact with a surface that is colder than the air, water will condense on the surface. When the temperature is below the freezing point of water, the dew point is called the frost point, as frost is formed rather than dew. + +windChill.label Wind chill +windChill.info Represent the lowering of body temperature due to the passing-flow of lower-temperature air. Wind chill numbers are always lower than the air temperature for values where the formula is valid. When the apparent temperature is higher than the air temperature, the heat index is used instead. +windChill.warning ${WIND_CHILL_CAUTION} +windChill.critical ${WIND_CHILL_DANGER} +windChillCaution.label Wind chill Caution +windChillCaution.info Danger of frostbite +windChillCaution.colour 5358f6 +windChillCaution.line ${WIND_CHILL_CAUTION} +windChillDanger.label Wind chill Danger +windChillDanger.info Great danger of frostbite +windChillDanger.colour 5c1ff5 +windChillDanger.line ${WIND_CHILL_DANGER} + +heatIndex.label Heat index +heatIndex.info Index that combines air temperature and relative humidity, in shaded areas, to posit a human-perceived equivalent temperature, as how hot it would feel if the humidity were some other value in the shade. +heatIndex.warning ${HEAT_INDEX_EXTREME_CAUTION} +heatIndex.critical ${HEAT_INDEX_DANGER} +heatIndexCaution.label Heat index Caution +heatIndexCaution.info Fatigue is possible with prolonged exposure and activity. Continuing activity could result in heat cramps. +heatIndexCaution.colour ffff66 +heatIndexCaution.line ${HEAT_INDEX_CAUTION} +heatIndexECaution.label Heat index Extreme Caution +heatIndexECaution.info Heat cramps and heat exhaustion are possible. Continuing activity could result in heat stroke. +heatIndexECaution.colour ffd700 +heatIndexECaution.line ${HEAT_INDEX_EXTREME_CAUTION} +heatIndexDanger.label Heat index Danger +heatIndexDanger.info Heat cramps and heat exhaustion are likely; heat stroke is probable with continued activity. +heatIndexDanger.colour ff8c00 +heatIndexDanger.line ${HEAT_INDEX_DANGER} +heatIndexEDanger.label Heat index Extreme Danger +heatIndexEDanger.info Heat stroke is imminent. +heatIndexEDanger.colour ff0000 +heatIndexEDanger.line ${HEAT_INDEX_EXTREME_DANGER} + +multigraph wunderground_${STATION_ID}.uv_index +graph_title UV Index in \(.neighborhood) +graph_info UV index ${STATION_INFO} +graph_category weather +graph_args -l 0 +graph_vlabel UV index +uv.label UV index +uv.min 0 +uv.warning 5 +uv.critical 7 +moderate.label Moderate +moderate.info Stay in shade near midday when the Sun is strongest. If outdoors, wear Sun protective clothing, a wide-brimmed hat, and UV-blocking sunglasses. Generously apply broad spectrum SPF 30+ sunscreen every 1.5 hours, even on cloudy days, and after swimming or sweating. Bright surfaces, such as sand, water, and snow, will increase UV exposure. +moderate.colour fff300 +moderate.line 3 +high.label High +high.info Reduce time in the Sun between 10 a.m. and 4 p.m. If outdoors, seek shade and wear Sun protective clothing, a wide-brimmed hat, and UV-blocking sunglasses. Generously apply broad spectrum SPF 30+ sunscreen every 1.5 hours, even on cloudy days, and after swimming or sweating. Bright surfaces, such as sand, water, and snow, will increase UV exposure. +high.colour f18b00 +high.line 6 +veryhigh.label Very high +veryhigh.info Minimize Sun exposure between 10 a.m. and 4 p.m. If outdoors, seek shade and wear Sun protective clothing, a wide-brimmed hat, and UV-blocking sunglasses. Generously apply broad spectrum SPF 30+ sunscreen every 1.5 hours, even on cloudy days, and after swimming or sweating. Bright surfaces, such as sand, water, and snow, will increase UV exposure. +veryhigh.colour e53210 +veryhigh.line 8 +extreme.label Extreme +extreme.info Try to avoid Sun exposure between 10 a.m. and 4 p.m. If outdoors, seek shade and wear Sun protective clothing, a wide-brimmed hat, and UV-blocking sunglasses. Generously apply broad spectrum SPF 30+ sunscreen every 1.5 hours, even on cloudy days, and after swimming or sweating. Bright surfaces, such as sand, water, and snow, will increase UV exposure. +extreme.colour b567a4 +extreme.line 11 + +multigraph wunderground_${STATION_ID}.wind +graph_title Wind Speed in \(.neighborhood) +graph_info Wind speed and gusts ${STATION_INFO} +graph_category weather +graph_args -l 0 --base 1000 +graph_vlabel Wind speed [${SPEED_UNIT}] +windSpeed.label Wind speed +windGust.label Wind gusts + +multigraph wunderground_${STATION_ID}.wind_direction +graph_title Wind Direction in \(.neighborhood) +graph_info Wind direction ${STATION_INFO} +graph_category weather +graph_args --base 1000 -l 0 --upper-limit 360 +graph_vlabel Wind [°] +winddir.label Wind origin +winddir.min 0 +winddir.max 360 +winddir.line 0 +north.label North +north.colour COLOUR0 +north.line 360 +east.label East +east.colour COLOUR1 +east.line 90 +south.label South +south.colour COLOUR2 +south.line 180 +west.label West +west.colour COLOUR9 +west.line 270 +\"" +} + +get_data() { + fetch "${API_URL}" | jq -r ".observations[0] + | @text \" +multigraph wunderground_${STATION_ID} +temp.value \(.${UNITS}.temp) +windChill.value \(.${UNITS}.windChill) +heatIndex.value \(.${UNITS}.heatIndex) +uv.value \(.uv) +precipRate.value \(.${UNITS}.precipRate) + +multigraph wunderground_${STATION_ID}.air_humidity +humidity.value \(.humidity) + +multigraph wunderground_${STATION_ID}.location +lon.value \(.lon) +lat.value \(.lat) +elev.value \(.${UNITS}.elev) + +multigraph wunderground_${STATION_ID}.precipitation +precipRate.value \(.${UNITS}.precipRate) +avgRate.value \(.${UNITS}.precipTotal*100 | round) +avgRate.extinfo Immediate precipitation: \(.${UNITS}.precipRate) ${PRECIPITATION_UNIT}/h; Daily total: \(.${UNITS}.precipTotal) ${PRECIPITATION_UNIT} + +multigraph wunderground_${STATION_ID}.air_pressure +pressure.value \(.${UNITS}.pressure) + +multigraph wunderground_${STATION_ID}.solar_radiation +solarRadiation.value \(.solarRadiation) + +multigraph wunderground_${STATION_ID}.temperature +temp.value \(.${UNITS}.temp) +dewpt.value \(.${UNITS}.dewpt) +windChill.value \(.${UNITS}.windChill) +heatIndex.value \(.${UNITS}.heatIndex) + +multigraph wunderground_${STATION_ID}.uv_index +uv.value \(.uv) + +multigraph wunderground_${STATION_ID}.wind +windSpeed.value \(.${UNITS}.windSpeed) +windGust.value \(.${UNITS}.windGust) + +multigraph wunderground_${STATION_ID}.wind_direction +winddir.value \(.winddir) +\"" | sed 's/ null$/U/' +} + +main () { + check_deps + + case ${1:-} in + config) + config + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then + get_data + fi + ;; + *) + get_data + ;; + esac +} + +main "${1:-}" diff --git a/plugins/websphere/webspherelogin b/plugins/websphere/webspherelogin index e1744c55..b4b57cfc 100755 --- a/plugins/websphere/webspherelogin +++ b/plugins/websphere/webspherelogin @@ -7,12 +7,12 @@ SITEANALYZERLOG="/opt/WebSphere/SiteAnalyzer/sa.log" if [ "$1" = "config" ]; then - echo 'graph_title WebSphere Portal online users' - echo 'graph_category appserver' -# echo 'graph_order total actual' - echo 'graph_order actual' + echo 'graph_title WebSphere Portal online users' + echo 'graph_category appserver' +# echo 'graph_order total actual' + echo 'graph_order actual' echo 'graph_vlabel Users' - echo 'graph_scale no' + echo 'graph_scale no' # echo 'total.label Total logged' # echo 'total.draw LINE2' # echo 'total.colour DDDDDD' diff --git a/plugins/network/ath9k_ b/plugins/wifi/ath9k_ similarity index 57% rename from plugins/network/ath9k_ rename to plugins/wifi/ath9k_ index 405b93c2..69876886 100755 --- a/plugins/network/ath9k_ +++ b/plugins/wifi/ath9k_ @@ -5,6 +5,7 @@ # * rate control statistics ("rc_stats") # * events (dropped, transmitted, beacon loss, ...) # * traffic (packets, bytes) +# * DFS events (processed patterns, approved signals) # # All data is collected for each separate station (in case of multiple # connected peers). Combined graphs are provided as a summary. @@ -15,7 +16,19 @@ # * micropython # # -# Copyright (C) 2015 Lars Kruse +# The following graphs are generated for each physical ath9k interface: +# phy0_wifi0_traffic +# phy0_wifi0_traffic.station0 +# ... +# pyh0_wifi0_events +# phy0_wifi0_events.station0 +# ... +# pyh0_wifi0_rc_stats +# phy0_wifi0_rc_stats.station0 +# ... +# +# +# Copyright (C) 2015-2018 Lars Kruse # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -31,8 +44,8 @@ # along with this program. If not, see . # # Magic markers -#%# capabilities=autoconf suggest -#%# family=auto +# #%# capabilities=autoconf suggest +# #%# family=auto """true" # ****************** Interpreter Selection *************** @@ -41,14 +54,16 @@ # # This "execution hack" works as follows: # * the script is executed by busybox ash or another shell -# * the above line (three quotes before and one quote after 'true') evaluates differently for shell and python: -# * shell: run "true" (i.e. nothing happens) -# * python: ignore everything up to the next three consecutive quotes +# * the above line (three quotes before and one quote after 'true') evaluates differently for +# shell and python: +# * shell: run "true" (i.e. nothing happens) +# * python: ignore everything up to the next three consecutive quotes # Thus we may place shell code here that will take care for selecting an interpreter. -# prefer micropython if it is available - otherwise fall back to any python (2 or 3) -if which micropython >/dev/null; then - /usr/bin/micropython "$0" "$@" +# prefer micropython if it is available - otherwise fall back to python 3 +MICROPYTHON_BIN=$(which micropython || true) +if [ -n "$MICROPYTHON_BIN" ]; then + "$MICROPYTHON_BIN" "$0" "$@" else python3 "$0" "$@" fi @@ -59,26 +74,20 @@ exit $? true < 0 def _get_up_down_pair(unit, key_up, key_down, factor=None, divider=None, use_negative=True): @@ -407,8 +544,9 @@ def get_scope(): name_prefix = "ath9k_" if called_name.startswith(name_prefix): scope = called_name[len(name_prefix):] - if not scope in PLUGIN_SCOPES: - print_error("Invalid scope requested: {0} (expected: {1})".format(scope, PLUGIN_SCOPES)) + if scope not in PLUGIN_SCOPES: + print_error("Invalid scope requested: {0} (expected: {1})" + .format(scope, PLUGIN_SCOPES)) sys.exit(2) else: print_error("Invalid filename - failed to discover plugin scope") @@ -422,23 +560,37 @@ def print_error(message): sys.stderr.write(message + linesep) +def do_fetch(ath9k): + for item in ath9k.get_values(get_scope()): + print(item) + + +def do_config(ath9k): + for item in ath9k.get_config(get_scope()): + print(item) + + if __name__ == "__main__": ath9k = Ath9kDriver(SYS_BASE_DIR, GRAPH_BASE_NAME) # parse arguments if len(sys.argv) > 1: - if sys.argv[1]=="config": - for item in ath9k.get_config(get_scope()): - print(item) + if sys.argv[1] == "config": + do_config(ath9k) + if os.getenv("MUNIN_CAP_DIRTYCONFIG") == "1": + do_fetch(ath9k) sys.exit(0) elif sys.argv[1] == "autoconf": - if os.path.exists(SYS_BASE_PATH): + if os.path.exists(SYS_BASE_DIR): print('yes') else: - print('no') + print('no (missing ath9k driver sysfs directory: {})'.format(SYS_BASE_DIR)) sys.exit(0) elif sys.argv[1] == "suggest": - for scope in PLUGIN_SCOPES: - print(scope) + if ath9k.has_devices(): + for scope in PLUGIN_SCOPES: + # skip the "dfs_events" scope if there is not DFS support + if (scope != "dfs_events") or ath9k.has_dfs_support(): + print(scope) sys.exit(0) elif sys.argv[1] == "version": print_error('olsrd Munin plugin, version %s' % plugin_version) @@ -451,9 +603,7 @@ if __name__ == "__main__": print_error("Unknown argument") sys.exit(1) - # output values - for item in ath9k.get_values(get_scope()): - print(item) + do_fetch(ath9k) # final marker for shell / python hybrid script (see "Interpreter Selection") EOF = True diff --git a/plugins/wifi/example-graphs/ath9k_-stats_phy0_dfs_events_year.png b/plugins/wifi/example-graphs/ath9k_-stats_phy0_dfs_events_year.png new file mode 100644 index 00000000..eb2fbe4f Binary files /dev/null and b/plugins/wifi/example-graphs/ath9k_-stats_phy0_dfs_events_year.png differ diff --git a/plugins/wifi/example-graphs/ath9k_-stats_phy0_phy0_wlan0_events_day.png b/plugins/wifi/example-graphs/ath9k_-stats_phy0_phy0_wlan0_events_day.png new file mode 100644 index 00000000..a32f0455 Binary files /dev/null and b/plugins/wifi/example-graphs/ath9k_-stats_phy0_phy0_wlan0_events_day.png differ diff --git a/plugins/wifi/example-graphs/ath9k_-stats_rcstats_day.png b/plugins/wifi/example-graphs/ath9k_-stats_rcstats_day.png new file mode 100644 index 00000000..a9f03400 Binary files /dev/null and b/plugins/wifi/example-graphs/ath9k_-stats_rcstats_day.png differ diff --git a/plugins/wifi/example-graphs/wireless_channel_active_-week.png b/plugins/wifi/example-graphs/wireless_channel_active_-week.png new file mode 100644 index 00000000..472fe858 Binary files /dev/null and b/plugins/wifi/example-graphs/wireless_channel_active_-week.png differ diff --git a/plugins/wifi/example-graphs/wireless_channel_active_-year.png b/plugins/wifi/example-graphs/wireless_channel_active_-year.png new file mode 100644 index 00000000..3f40a721 Binary files /dev/null and b/plugins/wifi/example-graphs/wireless_channel_active_-year.png differ diff --git a/plugins/wifi/example-graphs/wireless_channel_occupation_-day.png b/plugins/wifi/example-graphs/wireless_channel_occupation_-day.png new file mode 100644 index 00000000..3ec54227 Binary files /dev/null and b/plugins/wifi/example-graphs/wireless_channel_occupation_-day.png differ diff --git a/plugins/wifi/example-graphs/wireless_signal_noise_-day.png b/plugins/wifi/example-graphs/wireless_signal_noise_-day.png new file mode 100644 index 00000000..6c4c46b8 Binary files /dev/null and b/plugins/wifi/example-graphs/wireless_signal_noise_-day.png differ diff --git a/plugins/wifi/example-graphs/wireless_signal_ranges_-day.png b/plugins/wifi/example-graphs/wireless_signal_ranges_-day.png new file mode 100644 index 00000000..830c710f Binary files /dev/null and b/plugins/wifi/example-graphs/wireless_signal_ranges_-day.png differ diff --git a/plugins/wifi/wifi_signal b/plugins/wifi/wifi_signal index 7892cbe5..a06a8bd9 100755 --- a/plugins/wifi/wifi_signal +++ b/plugins/wifi/wifi_signal @@ -6,7 +6,7 @@ # License: GPL v. 2 # #%# family=auto -#%# capabilitoes=autoconf +#%# capabilities=autoconf PNWL=/proc/net/wireless @@ -22,9 +22,9 @@ do_config () { echo "graph_title WiFi signal and noise" echo "graph_args --base 1000 -u 0" echo "graph_vlabel dB" - echo "graph_category network" - echo "graph_info This graph shows the noise and singal levels of your WiFi devices" - + echo "graph_category wireless" + echo "graph_info This graph shows the noise and signal levels of your WiFi devices" + awk -F'[ :]*' '/:/ { print $2"_noise.label Noise "$2; print $2"_signal.label Signal "$2; @@ -32,23 +32,16 @@ do_config () { } do_autoconf () { - if [ ! -f $PNWL ] ; then - echo "no (no $PNWL)" - exit 1 + if [ ! -f "$PNWL" ]; then + echo "no (missing file '$PNWL')" + elif [ ! -r "$PNWL" ]; then + echo "no (cannot read file '$PNWL')" + elif grep -qs : "$PNWL"; then + echo yes + else + echo "no (no devices in $PNWL)" fi - - if [ ! -r $PNWL ] ; then - echo "no (could not read $PNWL)" - exit 1 - fi - - if grep -qs : $PNWL ; then - echo yes - exit 0 - fi - - echo "no (no devices in $PNWL)" - exit 1 + exit 0 } case $1 in diff --git a/plugins/wifi/wifi_signal_noise_ b/plugins/wifi/wifi_signal_noise_ deleted file mode 100755 index e4c2b946..00000000 --- a/plugins/wifi/wifi_signal_noise_ +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/sh -# -# Show current signal strength and noise for all connected peers of wifi devices. -# This plugin is suitable for wifi interfaces with a stable selection of peers -# (e.g. infrastructure). -# Author: Lars Kruse, devel@sumpfralle.de -# License: GPL v3 or later -# -# Requirements: -# * "iwinfo" tool (alternatively: fall back to "iw" - with incomplete data) -# * root privileges (for "iw" and "iwinfo") -# -# Magic markers -#%# capabilities=autoconf suggest -#%# family=auto - - -set -eu - - -# prefer "iwinfo" for information retrieval, if it is available -if which iwinfo >/dev/null; then - # "iwinfo" has a stable output format but is only available on openwrt - get_wifi_interfaces() { iwinfo | grep "^[a-zA-Z]" | awk '{print $1}'; } - # return MAC of peer and the signal strength - get_wifi_peers() { iwinfo "$1" assoclist | grep "^[0-9a-fA-F]" | awk '{print $1,$2}'; } - # the noise should be the same for all peers - get_wifi_noise() { iwinfo "$1" info | sed -n 's/^.* Noise: \([0-9-]\+\).*/\1/p'; } -else - # "iw" is available everywhere - but its output format is not recommended for non-humans - get_wifi_interfaces() { iw dev | awk '{ if ($1 == "Interface") print $2; }'; } - get_wifi_peers() { iw dev wlan0 station dump \ - | awk '{ if ($1 == "Station") mac=$2; if (($1 == "signal") && ($2 == "avg:")) print mac,$3}'; } - # TODO: there seems to be no way to retrieve the noise level via "iw" - get_wifi_noise() { echo; } -fi - - -clean_fieldname() { - echo "$1" | sed 's/^\([^A-Za-z_]\)/_\1/; s/[^A-Za-z0-9_]/_/g' -} - - -get_ip_for_mac() { - local ip - ip=$(arp -n | grep -iw "$1$" | awk '{print $1}' | sort | head -1) - [ -n "$ip" ] && echo "$ip" && return 0 - # no IP found - return MAC instead - echo "$1" -} - - -get_wifi_device_from_suffix() { - local suffix - local real_dev - # pick the part after the basename of the real file - suffix=$(basename "$0" | sed "s/^$(basename "$(readlink "$0")")//") - for real_dev in $(get_wifi_interfaces); do - [ "$suffix" != "$(clean_fieldname "$real_dev")" ] || echo "$real_dev" - done | head -1 -} - - -ACTION="${1:-}" - -case "$ACTION" in - config) - wifi=$(get_wifi_device_from_suffix) - echo "graph_title Wireless signal quality - $wifi" - echo "graph_args --upper-limit 0" - echo "graph_vlabel Signal and noise [dBm]" - echo "graph_category network" - echo "graph_info This graph shows the signal and noise for all wifi peers" - echo "noise.label Noise floor" - echo "noise.draw LINE" - # sub graphs for all peers - get_wifi_peers "$wifi" | while read mac signal; do - fieldname=$(clean_fieldname "peer_${mac}") - peer=$(get_ip_for_mac "$mac") - echo "signal_${fieldname}.label $peer" - echo "signal_${fieldname}.draw LINE" - done - ;; - autoconf) - [ -z "$(get_wifi_interfaces)" ] && echo "no (no wifi interfaces found)" && exit 1 - echo "yes" - ;; - suggest) - get_wifi_interfaces | while read ifname; do - clean_fieldname "$ifname" - done - ;; - "") - wifi=$(get_wifi_device_from_suffix) - peer_data=$(get_wifi_peers "$wifi") - echo "$peer_data" | while read mac signal; do - # ignore empty datasets - [ -z "$signal" ] && continue - fieldname=$(clean_fieldname "peer_${mac}") - echo "signal_${fieldname}.value $signal" - done - echo "noise.value $(get_wifi_noise "$wifi")" - ;; - *) - echo >&2 "Invalid action (valid: config)" - echo >&2 - ;; -esac diff --git a/plugins/wifi/wireless_channel_active_ b/plugins/wifi/wireless_channel_active_ new file mode 100755 index 00000000..82ae5551 --- /dev/null +++ b/plugins/wifi/wireless_channel_active_ @@ -0,0 +1,139 @@ +#!/bin/sh + +: << =cut + +=head1 NAME + +wireless_channel_active - Show currently used channel of wifi enabled devices + +=head1 APPLICABLE SYSTEMS + +Information is parsed from the output of the tool "iwinfo" (OpenWrt) or "iw" (most systems). + + +=head1 CONFIGURATION + +Symlink this plugin with the name of the wifi device added (e.g. "phy0"). + +Root permissions are probably required for accessing "iw". + + [wireless_channel_active*] + user root + + +=head1 VERSION + + 1.1 + + +=head1 AUTHOR + +Lars Kruse + + +=head1 LICENSE + +GPLv3 or above + + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest + +=cut + + +set -eu + +if which iwinfo >/dev/null; then + # "iwinfo" has a stable output format but is only available on OpenWrt + get_physical_interfaces() { iwinfo | sed -n 's/^.*PHY name: \+\(.*\)$/\1/p'; } + get_physical_interface_current_channel() { iwinfo "$1" info \ + | grep Channel | sed 's/^.*Channel: \+\([0-9]\+\) .*$/\1/'; } + get_physical_interfaces_channel_descriptions() { + # return: CHANNEL CHANNEL_DESCRIPTION + # e.g.: "104 5.520 GHz (Channel 104)" + iwinfo "$1" freqlist | sed 's/^[^0-9]*//' \ + | sed 's/^.*Channel \+\([0-9]\+\).*$/\1 \0/'; } +else + # "iw" is available everywhere - but its output format is not recommended for non-humans + get_physical_interfaces() { iw list | awk '/^\w/ {print $2}'; } + get_physical_interface_current_channel() { iw dev | awk ' + /^\w+#/ { phy_name=gensub("#", "", 1, $1); } + /channel/ { if (phy_name == "phy0") print($2); }'; } + get_physical_interfaces_channel_descriptions() { + # example input: "* 5680 MHz [136]" + # return: CHANNEL CHANNEL_DESCRIPTION + # e.g.: "136 5680 MHz [136]" + iw phy phy0 channels | awk ' + /\*/ { chan_num=gensub("^.*\\[", "", 1, gensub("\\].*$", "", 1, $0)); + print(chan_num, $2, $3, $4); }'; } +fi + + +get_selected_physical_interface() { + # The physical interface name should be safe (phyX) and needs no cleanup. + # pick the last segment after the final "_" + echo "$0" | sed 's/.*_//' +} + + +do_config() { + local phy + phy=$(get_selected_physical_interface) + [ -z "$phy" ] && echo >&2 "Missing wireless PHY" && return 1 + echo "graph_title Wireless channel usage - $phy" + echo "graph_args --base 1000 -r --lower-limit 0 --upper-limit 100" + echo "graph_vlabel Channel usage by time in percent" + echo "graph_category wireless" + echo "graph_info This graph shows the currently used channel of your WiFi device(s)" + get_physical_interfaces_channel_descriptions "$phy" | while read -r channel description; do + fieldname="channel_${channel}" + echo "${fieldname}.label $description" + echo "${fieldname}.draw AREASTACK" + # percent scaling + echo "${fieldname}.cdef 100,${fieldname},*" + done +} + + +do_fetch() { + local phy + local current_channel + phy=$(get_selected_physical_interface) + [ -z "$phy" ] && echo >&2 "Missing wireless PHY" && return 1 + current_channel=$(get_physical_interface_current_channel "$phy") + get_physical_interfaces_channel_descriptions "$phy" | while read -r channel description; do + [ "$current_channel" = "$channel" ] && value=1 || value=0 + echo "channel_${channel}.value $value" + done +} + + +ACTION="${1:-}" + +case "$ACTION" in + config) + do_config || exit 1 + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = 1 ]; then do_fetch; fi + ;; + autoconf) + if [ -z "$(get_physical_interfaces)" ]; then + echo "no (no wifi interfaces found)" + else + echo "yes" + fi + ;; + suggest) + get_physical_interfaces + ;; + "") + do_fetch + ;; + *) + echo >&2 "Invalid action (valid: config / autoconf / suggest / )" + echo >&2 + exit 2 + ;; +esac diff --git a/plugins/network/wireless_channel_occupation_ b/plugins/wifi/wireless_channel_occupation_ similarity index 59% rename from plugins/network/wireless_channel_occupation_ rename to plugins/wifi/wireless_channel_occupation_ index 71c54c3e..20999b1c 100755 --- a/plugins/network/wireless_channel_occupation_ +++ b/plugins/wifi/wireless_channel_occupation_ @@ -1,28 +1,50 @@ #!/bin/sh -# -# Monitor the wifi channel occupation (taken from "iw dev wlan0 survey dump"). -# -# Symlink this plugin with the name of the wifi interface added (e.g. "wlan0"). -# -# -# Copyright (C) 2015 Lars Kruse -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# Magic markers -#%# capabilities=autoconf suggest -#%# family=auto + +: << =cut + +=head1 NAME + +wireless_channel_occupation_ - Monitor occupation of wireless channels + + +=head1 APPLICABLE SYSTEMS + +All systems with at least one wireless interface and the the tool "iw". + +The wifi channel occupation is parsed from the output of "iw dev wlan0 survey dump". + + +=head1 CONFIGURATION + +Symlink this plugin with the name of the wifi interface added (e.g. "wlan0"). + +Root permissions are probably required for accessing "iw". + + [wireless_channel_occupation_*] + user root + + +=head1 VERSION + + 1.1 + + +=head1 AUTHOR + +Lars Kruse + + +=head1 LICENSE + +GPLv3 or above + + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest + +=cut set -eu @@ -35,36 +57,26 @@ clean_fieldname() { } -get_wifi_devices() { +get_wifi_interfaces() { iw dev | grep Interface | awk '{print $2}' } - get_wifi_device_from_suffix() { local suffix - local called - called=$(basename "$0") - suffix="${called#$SCRIPT_PREFIX}" - # empty result if the prefix did not match (and was not removed) - [ "$suffix" = "$0" ] && echo "" || echo "$suffix" + local real_dev + # pick the part after the basename of the real file + suffix=$(basename "$0" | sed "s/^$SCRIPT_PREFIX//") + for real_dev in $(get_wifi_interfaces); do + [ "$suffix" != "$(clean_fieldname "$real_dev")" ] || echo "$real_dev" + done | head -1 } -if [ "${1:-}" = "autoconf" ]; then - if which iw 2>/dev/null; then - if [ -n "$(get_wifi_devices)" ]; then - echo "yes" - else - echo "no (missing wifi devices)" - fi - else - echo "no (missing 'iw' dependency)" - fi -elif [ "${1:-}" = "suggest" ]; then - get_wifi_devices -elif [ "${1:-}" = "config" ]; then +do_config() { + local device + local dev_field device=$(get_wifi_device_from_suffix) - [ -z "$device" ] && echo >&2 "Invalid wifi device name given" && exit 1 + [ -z "$device" ] && echo >&2 "Invalid wifi device name given" && return 1 echo "graph_title Channel Occupation of $device" echo "graph_vlabel %" echo "graph_category wireless" @@ -93,13 +105,39 @@ elif [ "${1:-}" = "config" ]; then echo "${dev_field}_transmit.type DERIVE" echo "${dev_field}_transmit.draw STACK" echo "${dev_field}_transmit.cdef 100,${dev_field}_transmit,${dev_field}_active,/,*" -else +} + + +do_fetch() { + local device device=$(get_wifi_device_from_suffix) - [ -z "$device" ] && echo >&2 "Invalid wifi device name given" && exit 1 + [ -z "$device" ] && echo >&2 "Invalid wifi device name given" && return 1 iw dev "$device" survey dump \ | grep -F -A 5 "[in use]" \ | grep -E "channel (busy|receive|transmit|active) time:" \ - | awk '{print "'${device}_'"$2"'.value'",$4}' + | awk '{print "'"${device}_"'"$2"'.value'",$4}' +} + + +if [ "${1:-}" = "autoconf" ]; then + if which iw >/dev/null; then + if [ -n "$(get_wifi_interfaces)" ]; then + echo "yes" + else + echo "no (missing wifi devices)" + fi + else + echo "no (missing 'iw' dependency)" + fi +elif [ "${1:-}" = "suggest" ]; then + for dev in $(get_wifi_interfaces); do + clean_fieldname "$dev" + done +elif [ "${1:-}" = "config" ]; then + do_config || exit 1 + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then do_fetch; fi +else + do_fetch fi exit 0 diff --git a/plugins/wifi/wireless_signal_noise_ b/plugins/wifi/wireless_signal_noise_ new file mode 100755 index 00000000..170b20ce --- /dev/null +++ b/plugins/wifi/wireless_signal_noise_ @@ -0,0 +1,172 @@ +#!/bin/sh + +: << =cut + +=head1 NAME + +wireless_signal_noise_ - Show signal strength and noise for all connected peers of wifi interface + +=head1 APPLICABLE SYSTEMS + +This plugin is suitable for wifi interfaces with a stable selection of peers (e.g. infrastructure). +It is probably not useful for hotspot-like scenarios. + +Information is parsed from the output of the tool "iwinfo" (OpenWrt) or "iw" (most systems, +incomplete information). + + +=head1 CONFIGURATION + +Symlink this plugin with the name of the wifi interface added (e.g. "wlan0"). + +Root permissions are probably required for accessing "iw". + + [wireless_signal_noise_*] + user root + + +=head1 VERSION + + 1.1 + + +=head1 AUTHOR + +Lars Kruse + + +=head1 LICENSE + +GPLv3 or above + + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest + +=cut + + +set -eu + + +SCRIPT_PREFIX="wireless_signal_noise_" + + +# prefer "iwinfo" for information retrieval, if it is available +if which iwinfo >/dev/null; then + # "iwinfo" has a stable output format but is only available on openwrt + get_wifi_interfaces() { iwinfo | grep "^[a-zA-Z]" | awk '{print $1}'; } + # return MAC of peer and the signal strength + get_wifi_peers() { iwinfo "$1" assoclist | grep "^[0-9a-fA-F]" | awk '{print $1,$2}'; } + # the noise should be the same for all peers + get_wifi_noise() { iwinfo "$1" info | sed -n 's/^.* Noise: \([0-9-]\+\).*/\1/p'; } +else + # "iw" is available everywhere - but its output format is not recommended for non-humans + get_wifi_interfaces() { iw dev | awk '{ if ($1 == "Interface") print $2; }'; } + get_wifi_peers() { iw dev wlan0 station dump \ + | awk '{ if ($1 == "Station") mac=$2; if (($1 == "signal") && ($2 == "avg:")) print mac,$3}'; } + # TODO: there seems to be no way to retrieve the noise level via "iw" + get_wifi_noise() { echo; } +fi +if which arp >/dev/null; then + # openwrt does not provide 'arp' by default + get_arp() { arp -n; } +else + get_arp() { cat /proc/net/arp; } +fi + + +clean_fieldname() { + echo "$1" | sed 's/^\([^A-Za-z_]\)/_\1/; s/[^A-Za-z0-9_]/_/g' +} + + +get_ip_for_mac() { + local ip + ip=$(get_arp | grep -iw "$1$" | awk '{print $1}' | sort | head -1) + [ -n "$ip" ] && echo "$ip" && return 0 + # no IP found - return MAC instead + echo "$1" +} + + +get_wifi_device_from_suffix() { + local suffix + local real_dev + # pick the part after the basename of the real file + suffix=$(basename "$0" | sed "s/^$SCRIPT_PREFIX//") + for real_dev in $(get_wifi_interfaces); do + [ "$suffix" != "$(clean_fieldname "$real_dev")" ] || echo "$real_dev" + done | head -1 +} + + +do_config() { + local wifi + wifi=$(get_wifi_device_from_suffix) + [ -z "$wifi" ] && echo >&2 "Missing wifi: $wifi" && return 1 + echo "graph_title Wireless signal quality - $wifi" + echo "graph_args --upper-limit 0" + echo "graph_vlabel Signal and noise [dBm]" + echo "graph_category wireless" + echo "graph_info This graph shows the signal and noise for all wifi peers" + echo "noise.label Noise floor" + echo "noise.draw LINE" + # sub graphs for all peers + get_wifi_peers "$wifi" | while read -r mac signal; do + fieldname=$(clean_fieldname "peer_${mac}") + peer=$(get_ip_for_mac "$mac") + echo "signal_${fieldname}.label $peer" + echo "signal_${fieldname}.draw LINE" + done +} + + +do_fetch() { + local wifi + local peer_data + local noise + wifi=$(get_wifi_device_from_suffix) + [ -z "$wifi" ] && echo >&2 "Missing wifi: $wifi" && return 1 + peer_data=$(get_wifi_peers "$wifi") + echo "$peer_data" | while read -r mac signal; do + # ignore empty datasets + [ -z "$signal" ] && continue + fieldname=$(clean_fieldname "peer_${mac}") + echo "signal_${fieldname}.value $signal" + done + noise=$(get_wifi_noise "$wifi") + echo "noise.value ${noise:-U}" +} + + +ACTION="${1:-}" + +case "$ACTION" in + config) + do_config || exit 1 + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then do_fetch; fi + ;; + autoconf) + if [ -z "$(get_wifi_interfaces)" ]; then + echo "no (no wifi interfaces found)" + else + echo "yes" + fi + ;; + suggest) + get_wifi_interfaces | while read -r ifname; do + clean_fieldname "$ifname" + done + ;; + "") + do_fetch + ;; + *) + echo >&2 "Invalid action (valid: config / suggest / autoconf / )" + echo >&2 + exit 2 + ;; +esac diff --git a/plugins/wifi/wireless_signal_ranges_ b/plugins/wifi/wireless_signal_ranges_ new file mode 100755 index 00000000..0678fd05 --- /dev/null +++ b/plugins/wifi/wireless_signal_ranges_ @@ -0,0 +1,160 @@ +#!/bin/sh + +: << =cut + +=head1 NAME + +wireless_signal_ranges_ - Group and count all connected wifi peers by signal strength ranges + + +=head1 APPLICABLE SYSTEMS + +Information is parsed from the output of the tool "iwinfo" (OpenWrt) or "iw" (most systems). + +This plugin is suitable for wifi interfaces with a variable selection of peers (e.g. mobile +clients). + + +=head1 CONFIGURATION + +Symlink this plugin with the name of the wifi interface added (e.g. "wlan0"). + +Root permissions are probably required for accessing "iw". + + [wireless_signal_ranges_*] + user root + + +=head1 VERSION + + 1.1 + + +=head1 AUTHOR + +Lars Kruse + + +=head1 LICENSE + +GPLv3 or above + + +=head1 MAGIC MARKERS + + #%# family=auto + #%# capabilities=autoconf suggest + +=cut + +set -eu + + +SCRIPT_PREFIX="wireless_signal_ranges_" + +# thresholds for signal quality ranges: ascending values +SIGNAL_THRESHOLDS="-88 -80 -60 0" + + +# prefer "iwinfo" for information retrieval, if it is available +if which iwinfo >/dev/null; then + # "iwinfo" has a stable output format but is only available on openwrt + get_wifi_interfaces() { iwinfo | grep "^[a-zA-Z]" | awk '{print $1}'; } + # return MAC of peer and the signal strength + get_wifi_peers() { iwinfo "$1" assoclist | grep "^[0-9a-fA-F]" | awk '{print $2}'; } +else + # "iw" is available everywhere - but its output format is not recommended for non-humans + get_wifi_interfaces() { iw dev | awk '{ if ($1 == "Interface") print $2; }'; } + get_wifi_peers() { iw dev wlan0 station dump \ + | awk '{ if (($1 == "signal") && ($2 == "avg:")) print $3}'; } +fi + + +clean_fieldname() { + echo "$1" | sed 's/^\([^A-Za-z_]\)/_\1/; s/[^A-Za-z0-9_]/_/g' +} + + +get_level_fieldname() { + echo "range_${1#-}" +} + + +get_wifi_device_from_suffix() { + local suffix + local real_dev + # pick the part after the basename of the real file + suffix=$(basename "$0" | sed "s/^$SCRIPT_PREFIX//") + for real_dev in $(get_wifi_interfaces); do + [ "$suffix" != "$(clean_fieldname "$real_dev")" ] || echo "$real_dev" + done | head -1 +} + + +do_config() { + local wifi + local lower + wifi=$(get_wifi_device_from_suffix) + [ -z "$wifi" ] && echo >&2 "Missing wifi: $wifi" && return 1 + echo "graph_title Wireless signal quality ranges - $wifi" + echo "graph_args --upper-limit 0" + echo "graph_vlabel Signal ranges" + echo "graph_category wireless" + echo "graph_info This graph shows numbers of peers with defined wifi signal ranges" + lower="noise" + for level in $SIGNAL_THRESHOLDS; do + fieldname=$(get_level_fieldname "$level") + echo "${fieldname}.label $lower...$level" + echo "${fieldname}.draw AREASTACK" + lower="$level" + done +} + + +do_fetch() { + local wifi + local peer_data + local previous_count + local current_count + local fieldname + wifi=$(get_wifi_device_from_suffix) + [ -z "$wifi" ] && echo >&2 "Missing wifi: $wifi" && return 1 + peer_data=$(get_wifi_peers "$wifi") + previous_count=0 + for level in $SIGNAL_THRESHOLDS; do + current_count=$(echo "$peer_data" | awk ' + BEGIN { count=0; } + { if (($1 != "") && ($1 <= '"$level"')) count++; } + END { print count; }') + fieldname=$(get_level_fieldname "$level") + echo "${fieldname}.value $((current_count - previous_count))" + previous_count="$current_count" + done +} + + +ACTION="${1:-}" + +case "$ACTION" in + config) + do_config || exit 1 + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then do_fetch; fi + ;; + autoconf) + [ -z "$(get_wifi_interfaces)" ] && echo "no (no wifi interfaces found)" && exit 1 + echo "yes" + ;; + suggest) + get_wifi_interfaces | while read -r ifname; do + clean_fieldname "$ifname" + done + ;; + "") + do_fetch + ;; + *) + echo >&2 "Invalid action (valid: config / autoconf / suggest / )" + echo >&2 + exit 2 + ;; +esac diff --git a/plugins/wordpress/wordpress b/plugins/wordpress/wordpress index f2f08a27..3f2f5976 100755 --- a/plugins/wordpress/wordpress +++ b/plugins/wordpress/wordpress @@ -1,6 +1,6 @@ #!/bin/sh # wordpress-munin plugin -# +# # Author Andre Darafarin # Improvements by Chris Bair # Version 0.2 15 Feb 2011 @@ -38,7 +38,7 @@ Add file plugin-conf.d/wordpress and fill like this # Replace your_db_pass =item * env.DB_HOST host_of_your_db -# Replace with host of database server. Will be localhost for many users. +# Replace with host of database server. Will be localhost for many users. =back diff --git a/plugins/wordpress/wordpress-mu-or-network b/plugins/wordpress/wordpress-mu-or-network index a23074fe..dfc9f6a4 100755 --- a/plugins/wordpress/wordpress-mu-or-network +++ b/plugins/wordpress/wordpress-mu-or-network @@ -1,11 +1,11 @@ #!/bin/bash # wordpress-mu-munin plugin -# +# # Author Andre Darafarin # Improvements by Chris Bair # Modified for Wordpress MU (or Network) by raT rat@espiv.net @ 22-04-2011 # Version 0.2 15 Feb 2011 -# +# # # : <<=cut @@ -40,7 +40,7 @@ Add file plugin-conf.d/wordpress and fill like this # Replace your_db_pass =item * env.DB_HOST host_of_your_db -# Replace with host of database server. Will be localhost for many users. +# Replace with host of database server. Will be localhost for many users. =back diff --git a/plugins/wordpress/wordpress2 b/plugins/wordpress/wordpress2 old mode 100644 new mode 100755 index c075b21a..58fb9113 --- a/plugins/wordpress/wordpress2 +++ b/plugins/wordpress/wordpress2 @@ -8,7 +8,7 @@ error_reporting(E_ALL & ~E_NOTICE & ~E_WARNING); it's a simple plugin to monitor users, comments, pingbacks and your posts from your wordpress homepage. - Simply put your path of wp-config.php in your munin-node + Simply put your path of wp-config.php in your munin-node configuration and this plugin does the rest for you. Happy monitoring! :) @@ -19,7 +19,7 @@ error_reporting(E_ALL & ~E_NOTICE & ~E_WARNING); ---- CONFIGURATION ---- You just need to provide the path to your wp-config.php of your wordpress installation. - + The configuration for munin-node is by default at: /etc/munin/plugin-conf.d/munin-node @@ -109,13 +109,26 @@ $comments = 0; $pingbacks = 0; // GET DATA -mysql_connect($d["host"], $d["user"], $d["pass"]) or die("Error: Failed to connect to the MySQL database!"); -mysql_select_db($d["dbnm"]) or die("Error: Failed to select database!"); +if(function_exists('mysql_connect')) { + mysql_connect($d["host"], $d["user"], $d["pass"]) or die("Error: Failed to connect to the MySQL database!"); + mysql_select_db($d["dbnm"]) or die("Error: Failed to select database!"); -$users = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."users;"), 0); -$posts = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."posts WHERE post_status='publish' AND post_password='' AND post_type='post';"), 0); -$comments = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."comments WHERE comment_approved='1' AND comment_type='';"), 0); -$pingbacks = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."comments WHERE comment_approved='1' AND comment_type='pingback';"), 0); + $users = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."users;"), 0); + $posts = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."posts WHERE post_status='publish' AND post_password='' AND post_type='post';"), 0); + $comments = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."comments WHERE comment_approved='1' AND comment_type='';"), 0); + $pingbacks = mysql_result(mysql_query("SELECT COUNT(*) FROM ".$d["tbpf"]."comments WHERE comment_approved='1' AND comment_type='pingback';"), 0); +}else{ + $mysqli = new mysqli($d["host"], $d["user"], $d["pass"], $d["dbnm"]); + if ($mysqli->connect_errno) { + die("Error: Failed to connect to the MySQL database!"); + } + + $users = $mysqli->query("SELECT COUNT(*) AS cnt FROM ".$d["tbpf"]."users;")->fetch_object()->cnt; + $posts = $mysqli->query("SELECT COUNT(*) AS cnt FROM ".$d["tbpf"]."posts WHERE post_status='publish' AND post_password='' AND post_type='post';")->fetch_object()->cnt; + $comments = $mysqli->query("SELECT COUNT(*) AS cnt FROM ".$d["tbpf"]."comments WHERE comment_approved='1' AND comment_type='';")->fetch_object()->cnt; + $pingbacks = $mysqli->query("SELECT COUNT(*) AS cnt FROM ".$d["tbpf"]."comments WHERE comment_approved='1' AND comment_type='pingback';")->fetch_object()->cnt; + $mysqli->close(); +} // OUTPUT DATA echo "users.value ".$users ."\n"; diff --git a/plugins/wowza/wowza-media-server b/plugins/wowza/wowza-media-server index 2cbb73cf..fd04aa5e 100755 --- a/plugins/wowza/wowza-media-server +++ b/plugins/wowza/wowza-media-server @@ -141,7 +141,7 @@ try: print ("graph_vlabel minutes") print ("avg.label average listening duration") print ("mdn.label median listening duration") - + elif plugin_name == "wowza_vhost_listeners": print ("graph_title Wowza listeners count by vhosts") print ("graph_args --base 1000 -l 0") @@ -157,7 +157,7 @@ try: is_first = False else: print (vname,".draw STACK",sep='') - + elif plugin_name == "wowza_vhost_duration": print ("graph_title Wowza clients listening duration by vhosts") print ("graph_args --base 1000 -l 0") @@ -168,7 +168,7 @@ try: vname = vh["Name"].strip("/").replace(".","_").replace("-","_") print (vname,"_avg.label average listening duration for ",vh["Name"],sep='') print (vname,"_mdn.label median listening duration for ",vh["Name"],sep='') - + elif plugin_name == "wowza_vhost_uptime": print ("graph_title Wowza vhosts uptime") print ("graph_args --base 1000 -l 0") @@ -178,7 +178,7 @@ try: for vh in vhosts: vname = vh["Name"].strip("/").replace(".","_").replace("-","_") print (vname,".label vhost: ",vh["Name"],sep='') - + elif plugin_name == "wowza_app_listeners": print ("graph_title Wowza listeners count by apps") print ("graph_args --base 1000 -l 0") @@ -196,7 +196,7 @@ try: is_first = False else: print (vname,"_",aname,".draw STACK",sep='') - + elif plugin_name == "wowza_app_duration": print ("graph_title Wowza clients listening duration by apps") print ("graph_args --base 1000 -l 0") @@ -209,7 +209,7 @@ try: aname = app["Name"].strip("/").replace(".","_").replace("-","_") print (vname,"_",aname,"_avg.label average listening duration for ",vh["Name"],".",app["Name"],sep='') print (vname,"_",aname,"_mdn.label median listening duration for ",vh["Name"],".",app["Name"],sep='') - + elif plugin_name == "wowza_app_uptime": print ("graph_title Wowza apps uptime") print ("graph_args --base 1000 -l 0") @@ -221,7 +221,7 @@ try: for app in vh["Applications"]: aname = app["Name"].strip("/").replace(".","_").replace("-","_") print (vname,"_",aname,".label vhost.app: ",vh["Name"],".",app["Name"],sep='') - + else: # wowza_listeners print ("graph_title Wowza listeners count") print ("graph_args --base 1000 -l 0") diff --git a/plugins/wuala/wuala_stats b/plugins/wuala/wuala_stats index bab9827c..57ec557b 100755 --- a/plugins/wuala/wuala_stats +++ b/plugins/wuala/wuala_stats @@ -1,5 +1,5 @@ #!/usr/bin/perl -w -# +# # Plugin to monior the Wua.la Linux client # for shared, social storage from http://wua.la. # @@ -10,14 +10,14 @@ # # Set env.wualadir , e.g. /home/fred/wuala # and user fred. Please note, that if you don't run this -# plugin as the correct user it won't work, since the +# plugin as the correct user it won't work, since the # wualacmd command will only work when running as the correct user. # # The Plugin tries to detect if wuala is running and will # report unknown (U) as values if it is not running. # # This is a dual-personality plugin. If the file- or -# softlink-name of this plugin ends in _storage it will report +# softlink-name of this plugin ends in _storage it will report # the storage values and if it ends in _uptime it will report the # uptime. # @@ -33,11 +33,8 @@ # my $wualadir = $ENV{wualadir}; -if ($ARGV[0] and $ARGV[0] eq "autoconf") -{ - print "no\n"; - exit 1; -} elsif ($ARGV[0] and $ARGV[0] eq "config") + +if ($ARGV[0] and $ARGV[0] eq "config") { if($0 =~ /.*_uptime/) { print "graph_args -l0 --vertical-label %\n"; diff --git a/plugins/xastir/xastir b/plugins/xastir/xastir index 5894b181..bd575173 100755 --- a/plugins/xastir/xastir +++ b/plugins/xastir/xastir @@ -1,6 +1,6 @@ #!/bin/bash -## Copyright (C) 2012 Robert Kilian +## Copyright (C) 2012 Robert Kilian ## ## This file is part of the Xastir plugin for Munin. ## @@ -106,7 +106,7 @@ NETTX=`cat $LOGDIR/net.log | grep -v '^\#' | grep ^$STATION_CALL | wc -l` TNC=`cat $LOGDIR/tnc.log | grep -v '^\#' | grep -v ^$STATION_CALL | wc -l` TNCTX=`cat $LOGDIR/tnc.log | grep -v '^\#' | grep ^$STATION_CALL | wc -l` -# Display values +# Display values echo "igatetonet.value $IGATETONET" echo "message.value $MESSAGE" echo "messagetx.value $MESSAGETX" diff --git a/plugins/xbnbt/xbnbt_peers b/plugins/xbnbt/xbnbt_peers index 34d559ac..346cb2ec 100755 --- a/plugins/xbnbt/xbnbt_peers +++ b/plugins/xbnbt/xbnbt_peers @@ -75,14 +75,12 @@ if ($response->content =~ /xpeerstats.*?odd">(\d*).*?even">(\d*).*?odd">(\d*).*? if ( defined $ARGV[0] and $ARGV[0] eq "autoconf") { if (! $response->is_success) { print 'no (Error: ', $response->header('WWW-Authenticate') || ' Error accessing', $response->status_line, ' at http://'.$host.':'.$port.'/xstats.html Aborting)'; - exit 1; } elsif (! defined $peers ) { print "no (Unable to find peer values within the page from the given URL.)"; - exit 1; } else { print "yes"; - exit 0; } + exit 0; } diff --git a/plugins/xen/xen b/plugins/xen/xen index b44f2692..cad9b7a9 100755 --- a/plugins/xen/xen +++ b/plugins/xen/xen @@ -20,45 +20,43 @@ statefile="$MUNIN_PLUGSTATE/munin-plugin-xen.state" if [ "$1" = "autoconf" ]; then if which xm > /dev/null ; then echo yes - exit 0 + else + echo "no (xm not found)" fi - echo "no (xm not found)" - exit 1 + exit 0 fi if [ "$1" = "config" ]; then - if [ ! -e $statefile ]; then - touch $statefile - fi + if [ ! -e "$statefile" ]; then + touch "$statefile" + fi echo 'graph_title Xen Domain Utilisation' echo 'graph_args --base 1000 -l 0 --upper-limit 100 --rigid' echo 'graph_scale no' echo 'graph_vlabel %' - echo 'graph_category Virtualization' + echo 'graph_category virtualization' echo 'graph_info This graph shows how many percent of the CPU time was used by each domain' - xm list | grep -v "^Name .* Time(s)$" | \ - while read name domid mem cpu state time console; do - name=`echo $name | sed -e"s/[-.]/_/g"` - TEST=`less $statefile | grep "^${name}$" | wc -l` - if [ $TEST -ne 1 ]; then - echo "$name" >> $statefile - fi - done + xm list | grep -v "^Name .* Time(s)$" | while read -r name domid mem cpu state time console; do + name=$(echo "$name" | sed -e"s/[-.]/_/g") + TEST=$(grep "^${name}$" "$statefile" | wc -l) + if [ "$TEST" -ne 1 ]; then + echo "$name" >> "$statefile" + fi + done - FIRST=1 - cat $statefile | sort | \ - while read name; do + FIRST=1 + sort < "$statefile" | while read -r name; do echo "$name.label $name" echo "$name.type COUNTER" - if [ $FIRST -eq 1 ]; then - echo "$name.draw AREA" - FIRST=0 - else - echo "$name.draw STACK" - fi + if [ "$FIRST" -eq 1 ]; then + echo "$name.draw AREA" + FIRST=0 + else + echo "$name.draw STACK" + fi echo "$name.min 0" echo "$name.max 100" echo "$name.info % of the CPU time spent for $name" @@ -66,12 +64,10 @@ if [ "$1" = "config" ]; then exit 0 fi -xm list | grep -v "^Name .* Time(s)$" | \ -while read name domid mem cpu state time console; do - name=`echo $name | sed -e "s/[-.]/_/g"` - # only seconds - time=`echo $time | sed -e "s/\..//"` - # scale 60s/60s => 100%/60s - time=`echo "$time*100/60" | bc` +# shellcheck disable=SC2034 +xm list | grep -v "^Name .* Time(s)$" | while read -r name domid mem cpu state time console; do + name=$(echo "$name" | sed -e "s/[-.]/_/g") + # scale 60s/60s => 100%/60s + time=$(echo "$time" | awk '{print ($1 * 100 / 60) }') echo "$name.value $time" done diff --git a/plugins/xen/xen-cpu b/plugins/xen/xen-cpu index 307c8076..af90a60b 100755 --- a/plugins/xen/xen-cpu +++ b/plugins/xen/xen-cpu @@ -3,7 +3,7 @@ # Script to minitor the cpu usage of Xen domains # # Author: Adam Crews shroom com> -# +# # License: GPL # Based on the original xen script from Matthias Pfafferodt, syntron at web.de # @@ -40,11 +40,10 @@ if (defined($ARGV[0])) { if ( "$arg" eq 'autoconf') { if ( -e $XM && -e $XMTOP ) { print "yes\n"; - exit 0; } else { print "no ($XM and/or $XMTOP not found\n"; - exit 1; } + exit 0; } if ( "$arg" eq 'config') { @@ -53,12 +52,12 @@ if (defined($ARGV[0])) { 'graph_title' => 'Xen Domain CPU Usage', 'graph_args' => '--base 1000 -l 0 --upper-limit 100 --rigid', 'graph_vlabel' => 'Percent (%)', - 'graph_category' => 'Virtualization', + 'graph_category' => 'virtualization', 'graph_info' => 'Display the % of CPU Usage for each domain', - ); + ); my @domains = `$XM list`; - shift(@domains); # we dont need the header line + shift(@domains); # we don't need the header line my $cnt = "0"; foreach my $domain ( @domains ) { my ($dom,undef) = split(/\s/, $domain, 2); @@ -75,7 +74,7 @@ if (defined($ARGV[0])) { if ( "$cnt" == "0") { $cnf{$dom.'.draw'} = 'AREA'; } $cnt++; } - + foreach my $key (sort(keys(%cnf))) { print "$key $cnf{$key}\n"; } @@ -87,7 +86,7 @@ if (defined($ARGV[0])) { my @chunks; undef(@chunks); -{ +{ # run the xentop command a few times because the first reading is not always accurate local $/ = undef; @chunks = split(/^xentop - .*$/m, `$XMTOP -b -i2 -d2`); @@ -97,10 +96,10 @@ my @chunks; undef(@chunks); my @stats = split (/\n/,pop(@chunks)); # remove the first 4 items that are junk that we don't need. -shift(@stats); -shift(@stats); -shift(@stats); -shift(@stats); +shift(@stats); +shift(@stats); +shift(@stats); +shift(@stats); my %vals; undef(%vals); diff --git a/plugins/xen/xen-multi b/plugins/xen/xen-multi index 233dfb5c..5de46a98 100755 --- a/plugins/xen/xen-multi +++ b/plugins/xen/xen-multi @@ -31,7 +31,7 @@ This plugin produces four different graphs: CPU usage, memory usage, disk IOs and network traffic. In each graph, all Xen domains (including dom0) have their data stacked, giving -an overall amount of ressources used. +an overall amount of resources used. NOTE: xentop always reports 0 for dom0's disk IOs and network traffic, but both graphs show its entry all the same, so each domain can keep its own color @@ -129,7 +129,12 @@ sub trim_label { # Global variables my (%domains,$domain,@domainlist,$munindomain,$cpusecs,$cpupercent,$memk,$nettxk,$netrxk,$vbdrd,$vbdwr); -open (XENTOP,"xentop -b -f -i2 |") or die "Could not execute xentop, $!"; +# There is a bug in xentop, its output is too high in rare cases. +# https://lists.xenproject.org/archives/html/xen-users/2019-04/msg00020.html +# Workaround to process 3 iterations after the first one (4 iterations) and +# choose the lowest cpusecs value line. + +open (XENTOP,"xentop -b -f -i4 |") or die "Could not execute xentop, $!"; # Now we build a hash of hashes to store information while () { @@ -146,13 +151,21 @@ while () { # We need the remaining data only for a normal run if ($ARGV[0] eq "") { - $domains{$domain}{'cpusecs'} = $cpusecs; - $domains{$domain}{'cpupercent'} = $cpupercent; - $domains{$domain}{'mem'} = $memk; - $domains{$domain}{'nettx'} = $nettxk; - $domains{$domain}{'netrx'} = $netrxk; - $domains{$domain}{'vbdrd'} = $vbdrd; - $domains{$domain}{'vbdwr'} = $vbdwr; + # the cnt key counts the iterations + # - skip the first one because xentop gives 0% cpu for the first iteration ( cnt > 1 ) + # - process if not processed yet ( !defined ) or if current cpusecs less than any earlier + # normally the cpusecs is monotonous per domain as it grows so a smaller value means that the previous one was wrong + + $domains{$domain}{'cnt'}++; + if ( $domains{$domain}{'cnt'} > 1 && ( !defined $domains{$domain}{'cpusecs'} || $cpusecs < $domains{$domain}{'cpusecs'} ) ) { + $domains{$domain}{'cpusecs'} = $cpusecs; + $domains{$domain}{'cpupercent'} = $cpupercent; + $domains{$domain}{'mem'} = $memk; + $domains{$domain}{'nettx'} = $nettxk; + $domains{$domain}{'netrx'} = $netrxk; + $domains{$domain}{'vbdrd'} = $vbdrd; + $domains{$domain}{'vbdwr'} = $vbdwr; + } } } @@ -169,7 +182,7 @@ if ($ARGV[0] eq "config") { print "graph_args --base 1000 -l 0\n"; print "graph_vlabel cpu seconds\n"; print "graph_scale no\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_info This graph shows CPU time for each Xen domain.\n"; for $domain (@domainlist) { print "$domains{$domain}{'munindomain'}_cpu_time.label ".trim_label('pos',$domain)."\n"; @@ -186,7 +199,7 @@ if ($ARGV[0] eq "config") { print "graph_args --base 1000 -l 0 --upper-limit 100\n"; print "graph_vlabel %\n"; print "graph_scale no\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_info This graph shows CPU utilization for each Xen domain.\n"; for $domain (@domainlist) { print "$domains{$domain}{'munindomain'}_cpu.label ".trim_label('pos',$domain)."\n"; @@ -199,7 +212,7 @@ if ($ARGV[0] eq "config") { print "graph_title Xen domains memory usage\n"; print "graph_args --base 1024 -l 0\n"; print "graph_vlabel bytes\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_info This graph shows memory usage for each Xen domain.\n"; for $domain (@domainlist) { print "$domains{$domain}{'munindomain'}_mem.label ".trim_label('pos',$domain)."\n"; @@ -213,7 +226,7 @@ if ($ARGV[0] eq "config") { print "graph_title Xen domains network traffic\n"; print "graph_args --base 1000\n"; print "graph_vlabel bits per \${graph_period} in (-) / out (+)\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_info This graph shows network traffic for each Xen domain.\n"; for $domain (@domainlist) { print "$domains{$domain}{'munindomain'}_netrx.label none\n"; @@ -233,7 +246,7 @@ if ($ARGV[0] eq "config") { print "graph_title Xen domains disk IOs\n"; print "graph_args --base 1000\n"; print "graph_vlabel IOs per \${graph_period} read (-) / write (+)\n"; - print "graph_category Virtualization\n"; + print "graph_category virtualization\n"; print "graph_info This graph shows disk IOs for each Xen domain.\n"; for $domain (@domainlist) { print "$domains{$domain}{'munindomain'}_vbdrd.label none\n"; diff --git a/plugins/xen/xen_cpu_v2 b/plugins/xen/xen_cpu_v2 index 75c9a951..16db4b8c 100755 --- a/plugins/xen/xen_cpu_v2 +++ b/plugins/xen/xen_cpu_v2 @@ -7,8 +7,8 @@ # Based loosely on Adam Crews' xen_cpu script # # This script tries to measure the CPU usage of the Xen guests -# accurately. -# The problem with the current monitoring script is that these +# accurately. +# The problem with the current monitoring script is that these # scripts use the CPU output of xentop or xm list, which might be # inaccurate due to the resources used up at the time of the query by # the xm or xentop command. @@ -50,19 +50,18 @@ if ( defined($ARGV[0]) ) { $arg = 'autoconf'; } - + if ( $arg eq 'autoconf') { if ( -e $XM && -e $XMTOP ) { print "yes\n"; - exit 0; } else { print "no ($XM and/or $XMTOP not found\n"; - exit 1; } + exit 0; } if ( $arg eq 'config' ) @@ -72,13 +71,13 @@ if ( defined($ARGV[0]) ) 'graph_title' => 'Xen Domain CPU Usage v2', 'graph_args' => '--base 1000 -l 0 --upper-limit 100 --rigid', 'graph_vlabel' => 'Percent (%)', - 'graph_category' => 'Virtualization', + 'graph_category' => 'virtualization', 'graph_info' => 'Display the % of CPU Usage for each domain', ); my @domains = `$XM list`; my $cnt = 0; - shift(@domains); # we dont need the header line + shift(@domains); # we don't need the header line foreach my $domain ( @domains ) { my ($dom,undef) = split(/\s/, $domain, 2); @@ -102,7 +101,7 @@ if ( defined($ARGV[0]) ) exit 0; } } - + my @xmlist = `$XM list`; shift (@xmlist); @@ -124,7 +123,7 @@ if ( -e $TEMPFILE ) $_ =~ /(\S+)\s+\S+\s+\S+\s+\d+\s+\S+\s+(\S+)/; $dom{$1}->{'oldtime'} = $2; } - + close FH; } @@ -137,14 +136,14 @@ foreach my $domain ( @xmlist ) $diff = $dom{$1}->{'newtime'} - $dom{$1}->{'oldtime'}; $diff = sprintf("%.2f", $diff); - + # Calc the diff between old and new cputime, or reset the counter if ( $diff < 0 ) { $diff = $dom{$1}->{'newtime'}; } $dom{$1}->{'diff'} = $diff; - + # Calc a sum CPU usage $cpusum = $cpusum + $diff; } @@ -168,12 +167,16 @@ while (($key, $value) = each %dom) my $tmp = 0; $tmp = ( $dom{$key}->{'diff'} / $cpusum ) * 100; $dom{$key}->{'pc_time'} = sprintf("%.2f", $tmp); - + # Calc a percentage based on the _total_ available CPU time $tmp = 0; - $tmp = ( $dom{$key}->{'diff'} / $tcpuavail ) * 100; - $dom{$key}->{'pc_tcpu'} = sprintf("%.2f", $tmp); - + if ($tcpuavail != 0) { + $tmp = ( $dom{$key}->{'diff'} / $tcpuavail ) * 100; + $dom{$key}->{'pc_tcpu'} = sprintf("%.2f", $tmp); + } else { + $dom{$key}->{'pc_tcpu'} = "U"; + } + if ( $debug ) { print "$key newtime: ".$dom{$key}->{'newtime'}.", oldtime: ".$dom{$key}->{'oldtime'}.", diff: ".$dom{$key}->{'diff'}.", pc_bytime ".$dom{$key}->{'pc_time'}.", pc_bytcpu ".$dom{$key}->{'pc_tcpu'}."\n"; @@ -186,4 +189,4 @@ open(FH, ">", $TEMPFILE) or die $!; print FH $curtime."\n"; print FH @xmlist; close FH; - + diff --git a/plugins/xen/xen_memory b/plugins/xen/xen_memory index ef2532e4..90fbb56b 100755 --- a/plugins/xen/xen_memory +++ b/plugins/xen/xen_memory @@ -15,12 +15,12 @@ XM='/usr/sbin/xm' if [ "$1" = "autoconf" ]; then - if [ -e $XM ]; then + if [ -x "$XM" ]; then echo yes - exit 0 + else + echo "no (xm not found)" fi - echo "no (xm not found)" - exit 1 + exit 0 fi if [ "$1" = "config" ]; then @@ -29,39 +29,32 @@ if [ "$1" = "config" ]; then echo 'graph_args --base 1000 -l 0' echo 'graph_scale no' echo 'graph_vlabel MB' - echo 'graph_category Virtualization' + echo 'graph_category virtualization' echo 'graph_info This graph shows of many mS wall time where used by a domain' - # xm info | while read name bla value; do echo "$name $value"; done - $XM info | while read name bla value; do - #total_memory 2047 - #free_memory 1476 - name=`echo $name | sed -e"s/-/_/"` + "$XM" info | while read -r name bla value; do + # total_memory 2047 + # free_memory 1476 + name=$(echo "$name" | sed -e"s/-/_/") - if [ "$name" = "total_memory" ]; then + if [ "$name" = "total_memory" ] || [ "$name" = "free_memory" ]; then echo "$name.label $name" echo "$name.type GAUGE" + echo "$name.draw AREASTACK" echo "$name.min 0" - echo "$name.info total memory" - fi - if [ "$name" = "free_memory" ]; then - echo "$name.label $name" - echo "$name.type GAUGE" - echo "$name.draw AREA" -# echo "$name.draw STACK" - echo "$name.min 0" - echo "$name.info free memory" - fi + if [ "$name" = "total_memory" ]; then + echo "$name.info total memory" + elif [ "$name" = "free_memory" ]; then + echo "$name.info free memory" + fi + fi done exit 0 fi -$XM info | while read name bla value; do - name=`echo $name | sed -e"s/-/_/"` - if [ "$name" = "total_memory" ]; then - echo "$name.value $value" - fi - if [ "$name" = "free_memory" ]; then +# shellcheck disable=SC2034 +"$XM" info | while read -r name bla value; do + name=$(echo "$name" | sed -e"s/-/_/") + if [ "$name" = "total_memory" ] || [ "$name" = "free_memory" ]; then echo "$name.value $value" fi done - diff --git a/plugins/xen/xen_traffic_ b/plugins/xen/xen_traffic_ index f0b9eb85..386320e9 100755 --- a/plugins/xen/xen_traffic_ +++ b/plugins/xen/xen_traffic_ @@ -7,35 +7,33 @@ #%# capabilities=autoconf suggest -DOMAIN=$( basename $0 | sed 's/^xen_traffic_//g' ) -NAME=$( echo $DOMAIN | sed -e's/-/_/g' ) +DOMAIN=$( basename "$0" | sed 's/^xen_traffic_//g' ) +NAME=$( echo "$DOMAIN" | sed -e's/-/_/g' ) if [ "$1" = "autoconf" ]; then if which xm > /dev/null ; then - echo yes + if [ -r /proc/net/dev ]; then + echo yes + else + echo "no (/proc/net/dev not found)" + fi else echo "no (xm not found)" - exit 1 - fi - if [ -r /proc/net/dev ]; then - echo yes - else - echo "no (/proc/net/dev not found)" - exit 1 fi exit 0 fi if [ "$1" = "suggest" ]; then - xm list | awk '{print $1}' | egrep -v "^(Name|Domain-0)" + xm list | awk '{print $1}' | grep -v -E "^(Name|Domain-0)" exit 0 fi if [ "$1" = "config" ]; then echo "graph_title Xen Traffic for $NAME" + # shellcheck disable=SC2016 echo 'graph_vlabel bits in (-) / out (+) per ${graph_period}' echo 'graph_args --base 1024 -l 0' - echo 'graph_category Virtualization' + echo 'graph_category virtualization' echo 'out.label sent' echo 'out.type DERIVE' echo 'out.min 0' @@ -47,13 +45,12 @@ if [ "$1" = "config" ]; then exit 0 fi -dev=$( xm network-list $DOMAIN | egrep "^[0-9]+" | sed 's@^.*vif/\([0-9]*\)/\([0-9]*\).*$@vif\1.\2@') +dev=$( xm network-list "$DOMAIN" | grep '^[0-9]\+' | sed 's@^.*vif/\([0-9]*\)/\([0-9]*\).*$@vif\1.\2@') awk -v interface="$dev" \ - 'BEGIN { gsub(/\./, "\\.", interface) } \ + 'BEGIN { gsub(/\./, "\\.", interface) } $1 ~ "^" interface ":" { - split($0, a, /: */); $0 = a[2]; \ - print "in.value " $1 "\nout.value " $9 \ + split($0, a, /: */); $0 = a[2]; + print "in.value " $1 "\nout.value " $9; }' \ /proc/net/dev - diff --git a/plugins/xen/xen_traffic_all b/plugins/xen/xen_traffic_all index b1368739..b22a594f 100755 --- a/plugins/xen/xen_traffic_all +++ b/plugins/xen/xen_traffic_all @@ -11,60 +11,56 @@ if [ "$1" = "autoconf" ]; then if which xm > /dev/null ; then - echo yes + if [ -r /proc/net/dev ]; then + echo yes + else + echo "no (/proc/net/dev not found)" + fi else echo "no (xm not found)" - exit 1 - fi - if [ -r /proc/net/dev ]; then - echo yes - else - echo "no (/proc/net/dev not found)" - exit 1 fi exit 0 fi if [ "$1" = "config" ]; then echo 'graph_title Xen Traffic' + # shellcheck disable=SC2016 echo 'graph_vlabel bits received (-) / sent (+) per ${graph_period}' echo 'graph_args --base 1024 -l 0' echo 'graph_category Virtualization' - DOMAINS=$(xm list | awk '{print $1}' | egrep -v "^(Name|Domain-0)") + DOMAINS=$(xm list | awk '{print $1}' | grep -v -E "^(Name|Domain-0)") for dom in $DOMAINS; do - name=$( echo $dom | tr .- __ ) + name=$( echo "$dom" | tr .- __ ) #echo $name"UP.label $name" #echo $name"DOWN.label $name" - - echo $name'Down.label received' - echo $name'Down.type COUNTER' - echo $name'Down.graph no' + + echo "${name}Down.label received" + echo "${name}Down.type COUNTER" + echo "${name}Down.graph no" echo "${name}Down.cdef ${name}Down,8,*" echo "${name}Up.label ${name}" - echo $name'Up.type COUNTER' + echo "${name}Up.type COUNTER" echo "${name}Up.negative ${name}Down" echo "${name}Up.cdef ${name}Up,8,*" done exit 0 fi -DOMAINS=$(xm list | awk '{print $1}' | egrep -v "^(Name|Domain-0)") +DOMAINS=$(xm list | awk '{print $1}' | grep -v -E "^(Name|Domain-0)") for dom in $DOMAINS; do - dev=$( xm list $dom --long | awk '/vifname / { print $2 }' | sed 's/)//' ) - if [ "$dev" = "" ]; then - dev=$( xm network-list $dom |\ - egrep "^[0-9]+" | sed 's@^.*vif/\([0-9]*\)/\([0-9]*\).*$@vif\1.\2@') - fi - name=$( echo $dom | tr .- __ ) + dev=$( xm list "$dom" --long | awk '/vifname / { print $2 }' | sed 's/)//' ) + if [ -z "$dev" ]; then + dev=$( xm network-list "$dom" | grep '^[0-9]\+' | sed 's@^.*vif/\([0-9]*\)/\([0-9]*\).*$@vif\1.\2@') + fi + name=$( echo "$dom" | tr .- __ ) #awk -v name="$name" -v interface="$dev" -F'[: \t]+' \ #'{ sub(/^ */,""); if ($1 == interface) \ #print name"DOWN.value "$2"\n"name"UP.value "$10; }' /proc/net/dev awk -v name="$name" -v interface="$dev" \ - 'BEGIN { gsub(/\./, "\\.", interface) } \ + 'BEGIN { gsub(/\./, "\\.", interface) } $1 ~ "^" interface ":" { - split($0, a, /: */); $0 = a[2]; \ - print name"Down.value " $1 "\n"name"Up.value " $9 \ + split($0, a, /: */); $0 = a[2]; + print name"Down.value " $1 "\n"name"Up.value " $9; }' \ /proc/net/dev done - diff --git a/plugins/xen/xen_vbd b/plugins/xen/xen_vbd index da34765a..55e8c701 100755 --- a/plugins/xen/xen_vbd +++ b/plugins/xen/xen_vbd @@ -16,13 +16,13 @@ $XM = '/usr/sbin/xm'; $XMTOP = '/usr/sbin/xentop'; -# ah, parameters coming in +# ah, parameters coming in if ( defined($ARGV[0])) { if ($ARGV[0] eq 'config') { $arg = 'config'; } if ($ARGV[0] eq 'autoconf') { $arg = 'autoconf'; } - - if ( $arg eq 'autoconf' ) + + if ( $arg eq 'autoconf' ) { if ( -e $XM && -e $XMTOP ) { @@ -35,48 +35,48 @@ if ( defined($ARGV[0])) exit 0; } } - + if ( $arg eq 'config' ) { %cnf = ( 'graph_title' => 'Xen Domain I/O usage', 'graph_args' => '--base 1024 -l 0', 'graph_vlabel' => 'read (-), write (+)', - 'graph_category' => 'Virtualization', + 'graph_category' => 'virtualization', 'graph_info' => 'Display the I/O operations for each domain', ); - + @domains = `$XM list`; shift(@domains); # we don't need the header line - + foreach $domain ( @domains ) { ($dom, undef) = split(/\s/, $domain); $dom =~ s/[-.]/_/g; - + $cnf{ $dom.'RD' . '.label' } = 'read'; $cnf{ $dom.'RD' . '.type' } = 'COUNTER'; $cnf{ $dom.'RD' . '.graph' } = 'no'; $cnf{ $dom.'RD' . '.cdef' } = $dom.'RD,8,*'; - + $cnf{ $dom.'WR' . '.label' } = $dom; $cnf{ $dom.'WR' . '.type' } = 'COUNTER'; $cnf{ $dom.'WR' . '.negative' } = $dom.'RD'; $cnf{ $dom.'WR' . '.cdef' } = $dom.'WR,8,*'; - + if ( "$cnt" == "0" ) { $cnf { "$dom" . '.draw' } = 'AREA'; } $cnt++; } - + foreach $key ( sort(keys(%cnf)) ) { print "$key $cnf{$key}\n"; } exit 0; - + } } @@ -98,12 +98,12 @@ foreach $domain (@stats) { $domain =~ s/^\s+//; @tmp = split(/\s+/, $domain); - + $domname = $tmp[0]; $domname =~ s/[-.]/_/g; $vbdrd = $tmp[14]; $vbdwr = $tmp[15]; - + $vals{$domname."RD"}{'value'} = $vbdrd; $vals{$domname."WR"}{'value'} = $vbdwr; } @@ -112,4 +112,4 @@ foreach $key ( sort(keys(%vals)) ) { print "$key.value " . ($vals{$key}{'value'}) . "\n"; } - + diff --git a/plugins/yacy/yacy b/plugins/yacy/yacy index 19589f4e..8c6348c3 100755 --- a/plugins/yacy/yacy +++ b/plugins/yacy/yacy @@ -29,7 +29,7 @@ yacy - Munin plugin to monitor YaCy distributed search engine network. =head1 APPLICABLE SYSTEMS -YaCy +YaCy =head1 CONFIGURATION @@ -59,14 +59,14 @@ GPLv2 =cut BEGIN { - if(!eval "require XML::Smart;") { + if(!eval "require XML::Smart;") { die("XML::Smart not found"); } - if(!eval "require LWP;") { + if(!eval "require LWP;") { die("LWP not found"); } } - + use XML::Smart; use strict; @@ -99,7 +99,7 @@ EOF die("PUDDI PUDDI PUDDI PUDDI PUDDI") unless ($action); # Config -if ($ARGV[0] and $ARGV[0] eq "config") +if ($ARGV[0] and $ARGV[0] eq "config") { my $titles = { network_peers => 'YaCy Network Online Peers', diff --git a/plugins/yum/yum_activity b/plugins/yum/yum_activity index bad9a4dd..e41500f9 100755 --- a/plugins/yum/yum_activity +++ b/plugins/yum/yum_activity @@ -4,19 +4,18 @@ # # Parameters: # -# config -# autoconf +# config +# autoconf # if [ "$1" = "autoconf" ]; then if [ -r /var/log/yum.log ]; then - echo yes - exit 0 + echo yes else echo "no (/var/log/yum.log not readable)" - exit 1 fi + exit 0 fi if [ "$1" = "config" ]; then diff --git a/plugins/zfs/zfs-filesystem-graph b/plugins/zfs/zfs-filesystem-graph index a3bd62d6..57661038 100755 --- a/plugins/zfs/zfs-filesystem-graph +++ b/plugins/zfs/zfs-filesystem-graph @@ -2,12 +2,12 @@ # # Plugin to monitor a ZFS Filesystem # -# Wildcard-plugin to monitor a zfs filesystems. +# Wildcard-plugin to monitor a zfs filesystems. # # To monitor a filesystem, link zfs_fs__ to this file. E.g. # # ln -s /usr/share/munin/node/plugins-auto/zfs_fs_ /etc/munin/node.d/zfs_fs_tank_foo -# +# # ...will monitor tank/foo fs. # # You can monitor zpool as well by a link on zfs_fs_ @@ -16,28 +16,33 @@ # # config (required) # autoconf (optional - used by munin-config) -# +# # ** WARNING ** # For now this plugin does not allow "_" in the name of a zpool or filesystems # +# #%# capabilities=autoconf +# -myname=`basename $0 | sed 's/^zfs_fs_//g' | sed -e 's/_/\//g'` +myname=$(basename "$0" | sed 's/^zfs_fs_//g' | sed -e 's/_/\//g') if [ "$1" = "autoconf" ]; then - # Makes little sense to autoconf if you can't suggest - echo no + if which zfs >/dev/null; then + echo yes + else + echo "no (missing executable 'zfs')" + fi exit 0 fi -if [ "$1" = "suggest" ]; then - exit 0 -fi -values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $myname | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') ) +read -r -a values <<<"$(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota "$myname" \ + | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}')" + if [ "$1" = "config" ]; then - echo < /usr/local/bin/bash =head1 ENVIRONMENT VARIABLES - + None =head1 AUTHOR @@ -66,7 +66,7 @@ get_osname() { SunOS) case $osver in illumos*) - osname=illumos + osname=illumos ;; esac ;; @@ -84,7 +84,7 @@ preconfig() { case $func in size) global_attr=" - graph_title ZFS ARC - Size + graph_title ZFS ARC - Size graph_category fs graph_args --base 1024 --lower-limit 0 graph_vlabel Bytes @@ -150,7 +150,7 @@ preconfig() { " data_attr=" mfu_ghost_hits DERIVE LINE dummy - mfu_hits DERIVE LINE MFU + mfu_hits DERIVE LINE MFU mru_ghost_hits DERIVE LINE dummy mru_hits DERIVE LINE MRU " @@ -219,12 +219,12 @@ preconfig() { echo "Unknown function: $func" exit 1 ;; - esac + esac } do_config() { local func=$1 - local label_max_length=45 + local label_max_length=45 local field type draw label preconfig "$func" @@ -254,7 +254,7 @@ do_config() { get_stats() { local arcstats stat value - + case $osname in SunOS|illumos) arcstats=$( kstat -p 'zfs:0:arcstats' | sed -e 's/:/ /g' | awk '{ print $4,$5 }' ) @@ -286,8 +286,8 @@ get_stats() { esac while read -r stat value - do - printf -v "arcstats_${stat}" "%s" "$value" + do + printf -v "arcstats_${stat}" "%s" "$value" # printf -v means indirect variable assignment (similar to eval) done <<< "$arcstats" } @@ -303,8 +303,8 @@ do_fetch() { do [ -z "$field" ] && continue - ref="arcstats_${field}" - value=${!ref:-0} + ref="arcstats_${field}" + value=${!ref:-0} # ${!varname} means indirect evaluation (similar to eval) echo "${field}.value ${value}" @@ -351,7 +351,7 @@ autoconf) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/zfs/zfs_cache_efficiency b/plugins/zfs/zfs_cache_efficiency index e793ca6e..c65c7f77 100755 --- a/plugins/zfs/zfs_cache_efficiency +++ b/plugins/zfs/zfs_cache_efficiency @@ -16,7 +16,7 @@ graph_title ZFS ARC efficiency graph_vlabel % graph_scale no graph_category fs -graph_args -l 0 +graph_args -l 0 graph_info FreeBSD ZFS ARC Utilisation - Counters graph_period minute graph_order hits misses l2_hits l2_misses mfu_hits mru_hits arc_access_total mfu_hits_perc mru_hits_perc arc_misses_perc l2_efficency_tot @@ -216,22 +216,18 @@ EOF autoconf) if [ ! -x ${sysctl} ]; then echo "no (${sysctl} is not executable)" - exit 1 - fi - if [ ${ostype} = "FreeBSD" ]; then + elif [ "${ostype}" = "FreeBSD" ]; then echo "yes" - exit 0 - fi - if [ ${ostype} = "Linux" ]; then + elif [ "${ostype}" = "Linux" ]; then if [ -f ${procfile} ]; then echo "yes" - exit 0 + else + echo "no (The statsfile does not exist: ${procfile})" fi - echo "no (The statsfile does not exist: ${procfile})" - exit 1 + else + echo "no (Your OS is not supported by this plugin)" fi - echo "no (You're OS is not supported by this plugin)" - exit 1 + exit 0 ;; suggest) exit 0 diff --git a/plugins/zfs/zfs_list b/plugins/zfs/zfs_list index 72134b74..7307f9d5 100755 --- a/plugins/zfs/zfs_list +++ b/plugins/zfs/zfs_list @@ -16,6 +16,8 @@ # #%# family=auto +. "$MUNIN_LIBDIR/plugins/plugin.sh" + need_multigraph() if [ "$1" = "autoconf" ]; then @@ -31,7 +33,7 @@ fi if [ "$1" = "config" ]; then for i in `zfs list -Hp | awk '{print $1}'`; do values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') ) - fsname=`echo $i | sed 's/\//__/g'` + fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g')) echo <{$name}->{$key}=$value; } @@ -169,7 +173,11 @@ sub do_collect { sub do_config_fs { my ($fs) = @_; my $fs_slash = ($fs); + # try to restore underscores (see "do_collect" for the reverse operation): + # * substitute all underscores with slashes + # * afterwards transform *double* slashes back into a single underscore ($fs_slash =~ s/_/\//g); + ($fs_slash =~ s/\/\//_/g); if ( $fs ne $zpool ) { printf( "multigraph zfs_usage_%s.%s\n", diff --git a/plugins/zfs/zfsarcstats-counters b/plugins/zfs/zfsarcstats-counters index 594b3d31..04106875 100755 --- a/plugins/zfs/zfsarcstats-counters +++ b/plugins/zfs/zfsarcstats-counters @@ -23,15 +23,15 @@ $sysctl kstat.zfs.misc.arcstats.hits kstat.zfs.misc.arcstats | awk '/hits/ || /m autoconf) if [ ! -x ${sysctl} ]; then echo "no (${sysctl} is not executable)" - exit 1 + exit 0 fi ostype=`uname -s` if [ ${ostype} = "FreeBSD" ]; then echo "yes" - exit 0 + else + echo "no (Your OS is not supported by this plugin)" fi - echo "no (You're OS is not supported by this plugin)" - exit 1 + exit 0 ;; suggest) exit 0 diff --git a/plugins/zfs/zlist b/plugins/zfs/zlist index 161a23bc..7c27f986 100755 --- a/plugins/zfs/zlist +++ b/plugins/zfs/zlist @@ -29,7 +29,7 @@ fi case $i in *) name=`echo $i | awk '{ gsub("[^a-zA-Z0-9_]", "_", $1); print $1 }'` ;; esac - echo -n "$name.value " + echo -n "$name.value " echo $i|awk '{ print $4}' | awk ' /T/ {print $1*1000000}; /G/ {print $1*1000}; /M/ {print "777"}; /K/ {print "1"}' | sed 's/^\$name.value //' done diff --git a/plugins/zfs/zpool_capacity b/plugins/zfs/zpool_capacity index 70411ae7..d59080f1 100755 --- a/plugins/zfs/zpool_capacity +++ b/plugins/zfs/zpool_capacity @@ -7,9 +7,10 @@ zpool_capacity - Munin plugin to monitor ZFS capacity These functions are implemented: - capacity : to monitor zpool capacity % - allocated : to monitor zpool allocated bytes - dedup : to monitor zpool dedup and compress ratio + capacity : to monitor zpool capacity % + fragmentation : to monitor zpool fragmentation % + allocated : to monitor zpool allocated bytes + dedup : to monitor zpool dedup and compress ratio Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7 @@ -26,7 +27,7 @@ user root =head1 ENVIRONMENT VARIABLES - + critical : default 90 warning : default 80 @@ -34,6 +35,8 @@ K.Cima https://github.com/shakemid + Fragmentation graph by https://github.com/rantal + =head1 LICENSE GPLv2 @@ -54,7 +57,7 @@ set -o nounset # Global variables plugin_name=zpool_capacity -functions='capacity allocated dedup' +functions='capacity fragmentation allocated dedup' zpool_cmd=/sbin/zpool zfs_cmd=/sbin/zfs @@ -92,6 +95,20 @@ preconfig() { ${p} GAUGE LINE2 ${p}" done ;; + fragmentation) + global_attr=" + graph_title ZFS storage pool - Fragmentation + graph_category fs + graph_args --base 1000 --lower-limit 0 --upper-limit 100 + graph_vlabel % fragmentation + graph_info ZFS storage pool - Fragmentation + " + for p in $pool_list + do + data_attr="${data_attr} + ${p} GAUGE LINE2 ${p}" + done + ;; allocated) global_attr=" graph_title ZFS storage pool - Allocated bytes @@ -132,7 +149,7 @@ preconfig() { do_config() { local func="$1" - local label_max_length=45 + local label_max_length=45 local field type draw label preconfig "$func" @@ -168,6 +185,9 @@ get_stats() { capacity) "$zpool_cmd" list -H -o name,capacity | sed 's/%$//' ;; + fragmentation) + "$zpool_cmd" list -H -o name,fragmentation | sed 's/%$//' + ;; allocated) ( "$zpool_cmd" list -H -o name,allocated \ | awk '{ print $1"_allocated", $2 }' @@ -257,7 +277,7 @@ autoconf) ;; config) config - [ "${MUNIN_CAP_DIRTYCONFIG:-}" = "1" ] && fetch + if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi ;; *) fetch diff --git a/plugins/zfs/zpool_iostat b/plugins/zfs/zpool_iostat index 63400422..b209384d 100755 --- a/plugins/zfs/zpool_iostat +++ b/plugins/zfs/zpool_iostat @@ -60,7 +60,7 @@ fi zlines=$("$ZPOOL_BIN" iostat -v | wc -l | sed 's/ //g') iostats=$("$ZPOOL_BIN" iostat -v 1 1 | tail "-$zlines") zlist=$(echo "$iostats" \ - | gawk '/alloc/ {next}; /avail/ {next}; /raid/ {next}; /mirror/ {next}; + | awk '/alloc/ {next}; /avail/ {next}; /raid/ {next}; /mirror/ {next}; { if ( $4 >=0 ) print $1}' \ | tr ' ' '\n') @@ -71,10 +71,9 @@ get_device_iostat_column() { local stat_column="$2" # convert all numeric values into kB echo "$iostats" \ - | gawk '{ if ($1 == "'"$device_label"'") - print $'"$stat_column"'; }' \ - | gawk '/M/ {print strtonum($1)*1000}; - /K/ {print strtonum($1)}; + | awk '{ if ($1 == "'"$device_label"'") print $'"$stat_column"'; }' \ + | awk '/M/ {print int($1)*1000}; + /K/ {print int($1)}; /[0-9]$/ {print int($1)/1000}' } diff --git a/plugins/znc/README.md b/plugins/znc/README.md index 0f7204fc..497ef8eb 100644 --- a/plugins/znc/README.md +++ b/plugins/znc/README.md @@ -6,7 +6,7 @@ ZNC-Logs 2. Enable the log-plugin in znc (if you enable it for the complete instance, there will be some issues because this plugin only uses the network-name + channel-name, if there are some networks with the same name, it will count all lines together) -3. Add this to your `/etc/munin/plugin-conf.d/munin-node` +3. Add this to your `/etc/munin/plugin-conf.d/munin-node` ``` [znc_logs] user $your_znc_user diff --git a/plugins/znc/znc_logs.py b/plugins/znc/znc_logs.py index c2812e4a..75a9671b 100755 --- a/plugins/znc/znc_logs.py +++ b/plugins/znc/znc_logs.py @@ -159,7 +159,7 @@ def emit_config(): print('graph_period minute') graph_order = [] - if 'MUNIN_CAP_DIRTYCONFIG' in os.environ and os.environ['MUNIN_CAP_DIRTYCONFIG'] == 1: + if os.getenv('MUNIN_CAP_DIRTYCONFIG') == "1": read_data(1) else: read_data(0) diff --git a/plugins/zope/README-zodb b/plugins/zope/README-zodb index d1889cbf..ecd70dd0 100644 --- a/plugins/zope/README-zodb +++ b/plugins/zope/README-zodb @@ -17,7 +17,7 @@ wget --delete-after -q -O - localhost:8080//munin_db_activity.py wget --delete-after -q -O - localhost:8080//munin_cache_parameters.py Edit zope_db_activity and zope_cache_parameters scripts to reflect - and your instance(s) ports. + and your instance(s) ports. Then move them into your munin plugins lib, and create a symlink under etc according to the usual munin custom: @@ -33,4 +33,4 @@ Test them with: Restart your munin-node and the graphs should appear shortly. If you make any improvements (which should be easy), please update -the munin exhange entry, or mail me at gaute(at)pht(dot)no \ No newline at end of file +the munin exhange entry, or mail me at gaute(at)pht(dot)no diff --git a/plugins/zope/zeo_monitor_ b/plugins/zope/zeo_monitor_ index 60b59f48..8ad8cac1 100755 --- a/plugins/zope/zeo_monitor_ +++ b/plugins/zope/zeo_monitor_ @@ -11,7 +11,7 @@ # Reads has logarithmic scale # Adjust some scaling factors # Support for INET connections, the new default -# +# # Invoke using symlinks to zeo_monitor_ in the form zeo_monitor_{clients,reads,writes,errors}_ # # This plugin can be configured manually or by autoconf (provided that the ZEO @@ -26,7 +26,7 @@ # ln -s /usr/share/munin/plugins/zeo_monitor_ /etc/munin/plugins/zeo_monitor_reads_temp # ln -s /usr/share/munin/plugins/zeo_monitor_ /etc/munin/plugins/zeo_monitor_writes_1 # ln -s /usr/share/munin/plugins/zeo_monitor_ /etc/munin/plugins/zeo_monitor_writes_temp -# +# # # Configuration variables: # @@ -131,7 +131,7 @@ die qq(Symlink to this script by appending a mode and storage name such as "zeo_ ########## if ( $ARGV[0] and $ARGV[0] eq "config") { - + print <<"EOF"; graph_title ZEO $mode for storage $storage_name graph_args --base 1000 --lower-limit 0 @@ -258,7 +258,7 @@ sub parse_zeo_monitor { ) \n /sx; - + my %name_var=( 'Clients' => 'clients', 'Commits' => 'commits', @@ -268,7 +268,7 @@ sub parse_zeo_monitor { 'Conflicts' => 'conflicts', 'Conflicts resolved' => 'conflictsres', ); - + my %stats=(); foreach (split /\n/, $stats) { (my $name,my $value)=split ': ',$_,2; @@ -276,7 +276,7 @@ sub parse_zeo_monitor { next unless $var; $stats{$var}=$value; } - + return %stats; } diff --git a/plugins/zope/zope_cache_parameters b/plugins/zope/zope_cache_parameters index a7b30700..268dc7fc 100755 --- a/plugins/zope/zope_cache_parameters +++ b/plugins/zope/zope_cache_parameters @@ -13,7 +13,7 @@ url = "/munin_cache_parameters.py" if len(argv) > 1 and argv[1] == 'config': - # there is probably a better way to display this cached vs targed graph + # there is probably a better way to display this cached vs target graph # as a percentage of target possibly.. print """graph_title Zope cache parameters @@ -37,7 +37,7 @@ else: print 'obs_in_db%(i)s.value' % id, obs_in_db print 'obs_cached%(i)s.value'% id, obs_cached print 'obs_target%(i)s.value'% id, obs_target - + diff --git a/plugins/zope/zope_conflict_errors b/plugins/zope/zope_conflict_errors index 59ad66a6..5458d750 100755 --- a/plugins/zope/zope_conflict_errors +++ b/plugins/zope/zope_conflict_errors @@ -32,8 +32,8 @@ import time if len(argv) > 1: if argv[1] == 'config': - print """graph_title Zope Conflict Errors - graph_vlabel Count + print """graph_title Zope Conflict Errors + graph_vlabel Count graph_category appserver graph_info The number of conflict errors in event logs over the past 24h""".replace("\n ","\n") for i in range(0,len(logs)): @@ -46,13 +46,13 @@ else: log = logs[i] error_count = 0 for line in file(log): - splitted = line.split() - if 'ConflictError' in splitted: - logdate = datetime(*time.strptime(splitted[0], date_format)[:-3]) + tokens = line.split() + if 'ConflictError' in tokens: + logdate = datetime(*time.strptime(tokens[0], date_format)[:-3]) delta = datetime.now() - logdate if delta.days >= 1: continue - error_count += 1 + error_count += 1 id = dict(i=i) print 'error_count%(i)s.value' % id, error_count - + diff --git a/plugins/zope/zope_db_activity b/plugins/zope/zope_db_activity index ddcbbef6..7991ee5a 100755 --- a/plugins/zope/zope_db_activity +++ b/plugins/zope/zope_db_activity @@ -35,7 +35,7 @@ else: print 'load_count%(i)s.value' % id, total_load_count print 'store_count%(i)s.value'% id, total_store_count print 'connections%(i)s.value'% id, total_connections - + diff --git a/t/test-exception-wrapper b/t/test-exception-wrapper new file mode 100755 index 00000000..522f1a89 --- /dev/null +++ b/t/test-exception-wrapper @@ -0,0 +1,55 @@ +#!/bin/sh +# +# Run a check for a given file. A failure is tolerated (and expected), if the filename is listed +# in a file containing expected failures. +# +# Parameters: SCRIPT_FILENAME TEST_COMMAND [TEST_COMMAND_ARGUMENTS] +# +# See EXPECTED_FAILURES_LIST_FILENAME for the filename pattern of a file, containing the relative +# names of all scripts, that are expected to fail. This wrapper script will fail, if the exit +# status of the test does not match the expectated result (i.e. it fails but should pass or it +# passes while being listed in the EXPECTED_FAILURES_LIST_FILENAME file). +# + +set -eu + +[ $# -lt 2 ] && echo >&2 "Insufficient number of arguments: expecting SCRIPT_FILENAME and one or more COMMAND tokens" && exit 1 + + +SCRIPT_FILENAME="$1" +shift +EXPECTED_FAILURES_LIST_FILENAME="$0.expected-failures" + +REPOSITORY_DIR=$(cd "$(dirname "$0")/.." && pwd) + + +[ ! -e "$SCRIPT_FILENAME" ] && echo >&2 "Failed to find script: $SCRIPT_FILENAME" && exit 3 + + +# determine, whether the script is mentioned in the exclusion file +relative_script_filename=$(realpath --relative-to "$REPOSITORY_DIR" "$SCRIPT_FILENAME") +if grep --quiet --line-regexp --fixed-strings --no-messages "$relative_script_filename" "$EXPECTED_FAILURES_LIST_FILENAME"; then + is_expected_to_fail=1 +else + is_expected_to_fail=0 +fi + +# check the returncode of the test +if "$@" "$SCRIPT_FILENAME"; then + has_failed=0 +else + has_failed=1 +fi + + +# complain, if the result did not meet our expectation +if [ "$is_expected_to_fail" = "$has_failed" ]; then + # the check returned the expected result + exit 0 +elif [ "$has_failed" = "1" ]; then + echo >&2 "ERROR: the script '$SCRIPT_FILENAME' should pass the test, but it failed" + exit 4 +else + echo >&2 "ERROR: the script '$SCRIPT_FILENAME' was expected to fail the test, but it succeeded. Please remove this filename from the list of exepected failures ($EXPECTED_FAILURES_LIST_FILENAME)." + exit 5 +fi diff --git a/t/test-exception-wrapper.expected-failures b/t/test-exception-wrapper.expected-failures new file mode 100644 index 00000000..b0bdca69 --- /dev/null +++ b/t/test-exception-wrapper.expected-failures @@ -0,0 +1,507 @@ +plugins/accounting/accounting_ +plugins/amavis/amavis_awk +plugins/amavis/amavis-debian +plugins/amule/amule_queue +plugins/amule/amule_shares +plugins/amule/amule_transfers +plugins/amule/amule_uptime +plugins/apache/apache_cache_disk_count +plugins/apache/apache_threads +plugins/apache/apache_users +plugins/apache/page_load +plugins/apache/qpid_bytedepth +plugins/apache/qpid_discardsring +plugins/apache/qpid_enqueuebytes +plugins/apache/qpid_enqueuecount +plugins/apache/qpid_msgdepth +plugins/apt/deb_packages/deb_packages.py +plugins/arangodb/arangodb_ +plugins/arp/arp_ +plugins/arp/arp_bsd_ +plugins/asterisk/asterisk_channels +plugins/asterisk/asterisk_inuse +plugins/backuppc/backuppc +plugins/battery/acpi-battery +plugins/battery/battery_ +plugins/bigbrother/b3error_ +plugins/bind/bind9_resolver_stats +plugins/bind/bind9_server_stats +plugins/bind/bind9_socket_stats +plugins/bsd/df_abs_bsd +plugins/bsd/df_bsd +plugins/bsd/freebsd_hdd_power_state +plugins/bsd/netstat_bsd_m_ +plugins/bsd/spamd-blacklist-bsd +plugins/bsd/spamd-tarpit-bsd +plugins/cacti/cacti-host +plugins/cacti/cacti_poller_time +plugins/cacti/cacti_rrds +plugins/celery/celery_tasks +plugins/celery/celery_tasks_states +plugins/ceph/ceph-osd-info +plugins/change.org/changeorg_signature_count +plugins/chat/tinychat_users_ +plugins/cherokee/munin-plugin-for-cherokee +plugins/chilli/chilli_sessions_ +plugins/chrony/chrony +plugins/clamav/clamav +plugins/condor/condor_activity_ +plugins/condor/condor_ops_ +plugins/condor/condor_queue_ +plugins/condor/condor_states_ +plugins/courier/courier_log +plugins/cpu/cpu_ +plugins/cpu/cpuload_ +plugins/cpu/cpuutil +plugins/cpu/multicpu1sec +plugins/cpu/process_count +plugins/cpu/process_cpushare +plugins/currency/bitcoin/slush_hashrate_ +plugins/currency/bitcoin/slush_reward_ +plugins/currency/nanopool/nanopool_ +plugins/currency/zcash/zcash_flypool_hashrate_ +plugins/cyrus/cyrus-imapd +plugins/db2/db2_cnx +plugins/dhcp/dhcpd-pools +plugins/disk/df_with_nfs +plugins/disk/dm_cache_occupancy_ +plugins/disk/dm_cache_statistics_ +plugins/disk/du +plugins/disk/du_multidirs +plugins/disk/du_pattern +plugins/disk/e2 +plugins/disk/file_age +plugins/disk/freedisk +plugins/disk/hdsentinel +plugins/disk/iostat-xfrs +plugins/disk/log_sizes +plugins/disk/lvm_ +plugins/disk/lvm_snap_used +plugins/disk/megaraid-hdd-temperature-using-megacli +plugins/disk/quota2percent_ +plugins/disk/raid-mismatch-count +plugins/disk/scsi_queue +plugins/disk/snmp__areca_ +plugins/disk/useddisk +plugins/dkim/dkimproxy_mails +plugins/dovecot/dovecot +plugins/dovecot/dovecot_stats_ +plugins/dspam/dspam_ +plugins/dspam/dspam_activity +plugins/dvb/2wcomdsr_ +plugins/dvb/femon +plugins/ejabberd/ejabberd_resources_ +plugins/ejabberd/ejabberd_scanlog +plugins/emc/emc_vnx_block_lun_perfdata +plugins/emc/emc_vnx_file_ +plugins/fan/dell_fans +plugins/fan/ibmfan +plugins/fax/faxstat +plugins/firebird/firebird +plugins/freeradius/freeradius +plugins/freeradius/freeradius_queue +plugins/ftp/pure-ftpd +plugins/ftp/pure-ftpd-bw +plugins/ftp/pureftpd_count +plugins/ftp/pure-ftpd-logs +plugins/ftp/vsftpd-rel +plugins/games/cstat +plugins/git/git_commit_behind +plugins/glance/glance_size_ +plugins/glance/glance_status +plugins/glassfish/glassfish_counters_ +plugins/google/googlecode +plugins/google/google-rank +plugins/gpu/amd_gpu_ +plugins/gunicorn/gunicorn_memory_status +plugins/gunicorn/gunicorn_status +plugins/haproxy/haproxy_abort_backend +plugins/haproxy/haproxy_active_backend +plugins/haproxy/haproxy-bytes +plugins/haproxy/haproxy_bytes_backend +plugins/haproxy/haproxy_bytes_compressor_backend +plugins/haproxy/haproxy_bytes_compressor_frontend +plugins/haproxy/haproxy_bytes_frontend +plugins/haproxy/haproxy-connection-errors +plugins/haproxy/haproxy_denied_backend +plugins/haproxy/haproxy_denied_frontend +plugins/haproxy/haproxy-downtime +plugins/haproxy/haproxy-errors +plugins/haproxy/haproxy_errors_backend +plugins/haproxy/haproxy_errors_frontend +plugins/haproxy/haproxy-failed-checks +plugins/haproxy/haproxy_queue_backend +plugins/haproxy/haproxy_rate_backend +plugins/haproxy/haproxy_rate_frontend +plugins/haproxy/haproxy_reqrate_frontend +plugins/haproxy/haproxy_response_compressor_backend +plugins/haproxy/haproxy_response_compressor_frontend +plugins/haproxy/haproxy-response-errors +plugins/haproxy/haproxy_responses_backend +plugins/haproxy/haproxy_responses_frontend +plugins/haproxy/haproxy-sessions +plugins/haproxy/haproxy_sessions_backend +plugins/haproxy/haproxy-sessions-by-servers +plugins/haproxy/haproxy_sessions_frontend +plugins/haproxy/haproxy_sessions_total_backend +plugins/haproxy/haproxy_sessions_total_frontend +plugins/haproxy/haproxy_warnings_backend +plugins/harddisks/powered_up_ +plugins/horde/horde +plugins/http/http_pagespeed +plugins/http/http_responsecode +plugins/http/mongrel_memory +plugins/http/mongrel_process_memory +plugins/http/vhost_requests_ +plugins/http/wget_page +plugins/i2p/i2p_ +plugins/icecast/icecast_ +plugins/icecast/icecast2 +plugins/icecast/icecast2_simple +plugins/icecast/icecast2_stats_ +plugins/imapproxy/imapproxy_multi +plugins/ip6/ip6_ +plugins/ipvs/ipvs_active +plugins/ipvs/ipvs_bps +plugins/ipvs/ipvs_conn +plugins/ipvs/ipvs_cps +plugins/isp/internode_usage +plugins/jmx/plugin/jmx_ +plugins/jvm/jstat__gccount +plugins/jvm/jstat__gctime +plugins/jvm/jstat__heap +plugins/keystone/keystone_stats +plugins/libvirt/kvm_cpu +plugins/libvirt/kvm_io +plugins/libvirt/kvm_mem +plugins/libvirt/kvm_net +plugins/libvirt/munin-libvirtpy +plugins/logins/logins +plugins/logs/service_events +plugins/lxc/lxc_cpu +plugins/lxc/lxc_cpu_time +plugins/lxc/lxc_net +plugins/lxc/lxc_proc +plugins/lxd/lxd_disk +plugins/lxd/lxd_mem +plugins/mail/imap_bandwidth +plugins/mail/mail_connections +plugins/mailman/mailman-queue-check +plugins/mail/postfwd-rbl +plugins/memory/kmemsum +plugins/memory/multimemory +plugins/memory/proc_mem +plugins/memory/proc_memory_status +plugins/minecraft/minecraft-users +plugins/mixminion/mixminion +plugins/moblock/moblock_connections +plugins/mod_jk/mod_jk +plugins/moinmoin/moinoin_pages +plugins/mongodb/mongo_btree +plugins/mongodb/mongo_collection_ +plugins/mongodb/mongo_conn +plugins/mongodb/mongo_lag +plugins/mongodb/mongo_lock +plugins/mongodb/mongo_mem +plugins/mongodb/mongo_ops +plugins/mssql/microsoft-sql +plugins/mssql/microsoft-sql-buffer-cache-hit-ratio +plugins/mssql/microsoft-sql-data-file-sizes +plugins/mssql/microsoft-sql-log-file-size +plugins/mumble/mumble_users +plugins/mumble/murmur-stats +plugins/munin/healthcheck_log +plugins/munin/healthcheck_process +plugins/munin/healthcheck_url +plugins/munin/update +plugins/mysql/hs_read +plugins/mysql/hs_write +plugins/mysql/mysql_aggregate_ +plugins/mysql/mysql_report +plugins/mysql/mysql_size_ondisk +plugins/mythtv/dvb-signal +plugins/netapp/snmp__netapp_cifs +plugins/netapp/snmp__netapp_cpu +plugins/network/bgpd +plugins/network/brc_rssi +plugins/network/ddclient +plugins/network/denyhosts +plugins/network/ethtool_ +plugins/network/fwbuilder_ +plugins/network/hfsc +plugins/network/hfsc_sep +plugins/network/host_traffic +plugins/network/http__tp_link +plugins/network/if1sec_ +plugins/network/ifem_ +plugins/network/if_uptime +plugins/network/iperf_ +plugins/network/ip_forward_ +plugins/network/ipfwcnt_ +plugins/network/ipfwnat_ +plugins/network/ipt_accounting_ +plugins/network/ipt_basic_ +plugins/network/ldap_connections +plugins/network/linux_if/linux_if +plugins/network/mtr100_ +plugins/network/netatalk +plugins/network/netatalk3 +plugins/network/net_hosts_ +plugins/network/netstat_s_ +plugins/network/nsd3 +plugins/network/olsrd +plugins/network/packetloss +plugins/network/proc_netstat +plugins/network/psad +plugins/network/shorewall_ +plugins/network/shorewall_acc +plugins/network/shorewall-accounting_ +plugins/network/smtp_hello_ +plugins/network/sockstat +plugins/network/tc_ +plugins/network/tc_drops_ +plugins/network/tc_packets_ +plugins/network/traffic +plugins/network/traffic_ipt +plugins/network/transmission +plugins/network/umts_sig +plugins/network/vnstat +plugins/network/vnstat_ +plugins/nfs/nfsv4 +plugins/nginx/nginx-cache-multi_ +plugins/nginx/nginx_upstream +plugins/nginx/nginx_vhost_traffic +plugins/nginx/nginx_working_set +plugins/nova/nova_floating_ips +plugins/nova/nova_instance_ +plugins/nova/nova_instance_launched +plugins/nova/nova_instance_timing +plugins/nova/nova_services +plugins/ntp/ntpdate_ +plugins/ntp/ntp_kernel_pll_prec +plugins/ntp/ntp_pool_score_ +plugins/openntpd/openntp_offset +plugins/openvz/openvz_ +plugins/oracle/oracle_connections +plugins/oracle/oracle-pga-monitor +plugins/oracle/oracle-sga +plugins/oracle/oracle_sysstat +plugins/ossec/ossec_active_response +plugins/ossec/ossec_agents +plugins/ossec/ossec_alerts +plugins/other/beanstalkd +plugins/other/cm2 +plugins/other/delayed_jobs_queue_size +plugins/other/earthquakes-stronger-than-m4-and-m5-plus-solar-act +plugins/other/foldingathome_activecpu +plugins/other/globesurfer +plugins/other/hookbox +plugins/other/listeners +plugins/other/pid +plugins/other/port_ +plugins/other/s9y +plugins/other/services +plugins/passenger/passenger_memory +plugins/passenger/passenger_processes +plugins/passenger/passenger_status +plugins/pdns/pdns_errors +plugins/pdns/pdns_queries +plugins/pdns/pdns_rec_qtypes +plugins/pf/pf +plugins/pf/pf_bytes +plugins/pf/pf_ipv4_ipv6_packets +plugins/pf/pf_openbsd +plugins/pf/pf_packets +plugins/pf/pf_states +plugins/php/eaccelerator +plugins/php/eaccelerator-python +plugins/php/php5-fpm_status +plugins/php/php_apc_ +plugins/php/php-cgi +plugins/php/php_eaccelerator +plugins/php/php_errors_ +plugins/php/php_opcache +plugins/php/php_sessions +plugins/php/php_time_execution +plugins/ping/fping_ +plugins/ping/pinger +plugins/ping/ping_host +plugins/ping/ping-with-ceil +plugins/poseidon/snmp__poseidon-sensors +plugins/postfix/greyfix +plugins/postfix/policyd-spf-python +plugins/postfix/postfix_filtered +plugins/postfix/postfix_filtered_awk +plugins/postfix/postfix_mailfiltered +plugins/postfix/postfix_mailqueue_ +plugins/postfix/postfix_mail_stats +plugins/postfix/postfix_mail_stats1 +plugins/postfix/postfix-policyd +plugins/postfix/postfix-queue-size +plugins/postfix/postfix-rbl-blocked-mails +plugins/postfix/postgrey +plugins/postgresql/pgbouncer_client_connections +plugins/postgresql/pgbouncer_maxwait +plugins/postgresql/pgbouncer_server_connections +plugins/postgresql/postgresql_active_backends +plugins/postgresql/postgresql_active_backends_by_database +plugins/postgresql/postgresql_active_locks +plugins/postgresql/postgresql_database_ratio +plugins/postgresql/postgresql_database_size +plugins/postgresql/postgresql_tablespace_size +plugins/postgresql/postgres_queries2_ +plugins/postgresql/slony_lag_events_ +plugins/postgresql/slony_lag_time +plugins/postgresql/slony_lag_time_ +plugins/power5/consumed_cpu_cycles +plugins/power5/cpu_entitlemens +plugins/power5/cpu_in_lpar +plugins/power5/weight_of_a_lpar +plugins/power/apc_status +plugins/power/eatonups_ +plugins/powermta/pmta_ +plugins/powermta/powermta_vmta_recpients +plugins/power/nut +plugins/printer/dell_5310n_health_ +plugins/printer/dell_5310n_pages_ +plugins/printer/hp2600_count_ +plugins/printer/hp2600_status_ +plugins/printer/oki_c5500_health_ +plugins/printer/oki_c5500_pages_ +plugins/printer/toshiba_5520c_byfunction_black_ +plugins/printer/toshiba_5520c_byfunction_fullcolor_ +plugins/printer/toshiba_5520c_print_ +plugins/printer/toshiba_5520c_scan_ +plugins/printer/xerox-wc3220 +plugins/printer/xerox-wc7232-consumables +plugins/puma/puma_ +plugins/puppet/puppet_runtime +plugins/qmail/qmailconn +plugins/qmail/queuestats +plugins/qmail/spamdyke +plugins/rabbitmq/rabbitmq_consumers +plugins/rabbitmq/rabbitmq_messages +plugins/rabbitmq/rabbitmq_messages_unacknowledged +plugins/rabbitmq/rabbitmq_messages_uncommitted +plugins/rabbitmq/rabbitmq_queue_memory +plugins/radiator/radiator_acct_lag +plugins/radiator/radiator_acct_ppm +plugins/radiator/radiator_auth_lag +plugins/radiator/radiator_auth_ppm +plugins/raspberry-pi/cpu_freq_1sec +plugins/raspberry-pi/w1_ +plugins/requesttracker/rt_ticket_loadtime +plugins/rethinkdb/rethinkdb_node_io +plugins/riak/riak_fsm_time_95 +plugins/riak/riak_memory +plugins/riak/riak_node +plugins/router/ag241-adsl +plugins/router/arris-tm502g_ +plugins/router/bbox +plugins/router/cisco-epc3010_ +plugins/router/conexant_adsl +plugins/router/dartybox +plugins/router/d-link-dir-655-router-statistics-plugin +plugins/router/dsl-connection-speed +plugins/router/dsl-stats +plugins/router/freeboxuptime +plugins/router/motorola_sb6141 +plugins/router/snmp__juniper +plugins/router/snmp__juniper_spu +plugins/router/snmp__linksys_poe +plugins/router/speedport_300 +plugins/rsync/rsyncd_bytes +plugins/rsync/rsyncd_count +plugins/sar/iostat-cputps-average +plugins/scalix/scalix_clients +plugins/scalix/scalix_processes +plugins/scalix/scalix_queues +plugins/senderbase/senderbase +plugins/senderscore/senderscore +plugins/sensors/alertme_keyfobsathome +plugins/sensors/alertme_power +plugins/sensors/cleware +plugins/sensors/nvclock +plugins/sensors/sequoia_websens +plugins/sge/sge_job_stats +plugins/sge/sge_queue_ +plugins/sge/sge_queue_xml_ +plugins/smstools/smstools_ +plugins/snmp/snmp___bri_se_ +plugins/snmp/snmp__brocade_ifs +plugins/snmp/snmp__fn +plugins/snmp/snmp_room_alert_ +plugins/snmp/snmp__webthermometer +plugins/solaris/io_disk +plugins/solaris/zones_cpu +plugins/solaris/zones_mem +plugins/spamassasin/sa-learn +plugins/sphinx/sphinx_documents +plugins/squid/squid_efficiency +plugins/squid/squid_times +plugins/ssh/openssh-denyhosts +plugins/ssh/sshd_invalid_countries_ruby +plugins/ssh/sshd_log +plugins/streaming/packetship_ +plugins/swift/swift-async_ +plugins/swift/swift-quarantined_ +plugins/swift/swift-replication-time_ +plugins/system/auth +plugins/system/blockhosts +plugins/systemd/systemd_units +plugins/system/file_length_ +plugins/tarsnap/tarsnap +plugins/tcp/tcp-retransmissions +plugins/tcp/tcp-states +plugins/thin/thin_memory +plugins/thin/thins_peak_memory +plugins/thin/thin_threads +plugins/tinydns/tinydns +plugins/tinydns/tinydns_err +plugins/tor/tor_traffic +plugins/trafic_ro/trafic_ro_24h +plugins/tv/hdhomerun_ +plugins/twemproxy/nutcracker_requests_ +plugins/unicorn/unicorn_ +plugins/unicorn/unicorn_memory_status +plugins/unicorn/unicorn_status +plugins/user/membyuser +plugins/user/multipsu +plugins/user/system_users +plugins/uwsgi/uwsgi_ +plugins/varnish/varnish_devicedetect +plugins/vmware/esxcli_env_ +plugins/vmware/esxi +plugins/vmware/esxi__sensors +plugins/vmware/vm_cpu_load +plugins/voldemort/voldemort +plugins/vpn/openvpn_as_mtime +plugins/vpn/openvpn_as_traffic +plugins/vpn/openvpn_as_ttime +plugins/vpn/openvpn_as_users +plugins/vserver/vserver_limit_hits +plugins/vserver/vserver_limits +plugins/vserver/vserver_procs +plugins/weather/openweather_ +plugins/websphere/webspherelogin +plugins/wifi/ath9k_ +plugins/wifi/wifi_signal +plugins/wordpress/wordpress +plugins/wordpress/wordpress-multisite +plugins/wordpress/wordpress-mu-or-network +plugins/wowza/wowza-media-server +plugins/xastir/xastir +plugins/yum/yum_activity +plugins/zfs/zfsarcstats-counters +plugins/zfs/zfs_cache_efficiency +plugins/zfs/zfs_list +plugins/zfs/zfsonlinux_stats_ +plugins/zfs/zfs_stats_ +plugins/zfs/zlist +plugins/zfs/zpool_iostat +plugins/zimbra/zimbra-mailboxsizes +plugins/znc/znc_logs.py +plugins/zope/zeomonitor +plugins/zope/zope_cache_parameters +plugins/zope/zope_conflict_errors +plugins/zope/zope_db_activity diff --git a/t/test.t b/t/test.t index 08debf70..a65c1220 100644 --- a/t/test.t +++ b/t/test.t @@ -18,13 +18,14 @@ sub wanted { ( ( $dev, $ino, $mode, $nlink, $uid, $gid ) = lstat($_) ) && -f _ + && -s _ && ( ( $interpreter, $arguments ) = hashbang("$_") ) && ($interpreter) && ++$num_plugins && process_file( $_, $name, $interpreter, $arguments ); } -File::Find::find( { wanted => \&wanted }, 'plugins' ); +File::Find::find( { wanted => \&wanted, no_chdir => 1 }, 'plugins' ); sub hashbang { my ($filename) = @_; @@ -48,44 +49,84 @@ sub process_file { my ( $file, $filename, $interpreter, $arguments ) = @_; use v5.10.1; - if ( $interpreter =~ m{/bin/sh} ) { + if ( -r "$file.nocheck") { + SKIP: { + skip( sprintf("\nFile '%s' has a .nocheck flag. Ignoring\n", $file), 1); + pass("Not pretending everything is ok"); + } + } + elsif ( ! -x $file ) { + # missing executable flag + diag( + sprintf("\nFile '%s' lacks executable permission bits. Maybe try 'chmod +x $file'?\n", + $file) + ); + } + elsif ( $interpreter =~ m{/bin/sh} ) { subtest $filename => sub { - plan tests => 2; + plan tests => 3; run_check( { command => [ 'sh', '-n', $file ], description => 'sh syntax check' } ); + my $checkbashisms_location = `command -v checkbashisms 2>/dev/null`; + chomp($checkbashisms_location); + my $command; + if ($checkbashisms_location ne "") { + # monkey-patch "checkbashisms" in order to allow "command -v" + # see https://unix.stackexchange.com/a/85250: "command -v" vs. which/hash/... + # see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=733511 + my $run_modified_checkbashisms = q/sed 's#command\\\s+-\[\^p\]#command\s+-[^pvV]#'/ + . " '$checkbashisms_location' | perl - '$file'"; + $command = [ 'sh', '-c', $run_modified_checkbashisms ]; + } else { + # make sure that the non-confusing "checkbashisms not found" message is displayed + $command = [ 'checkbashisms', $file ]; + } run_check( - { command => [ 'checkbashisms', $file ], + { command => $command, description => 'checkbashisms' } ); + run_check( + { command => [ 't/test-exception-wrapper', $file, 'shellcheck', '--exclude=SC1090,SC2009,SC2126,SC2230', '--shell=dash' ], + description => 'shellcheck' + } + ); }; } elsif ( $interpreter =~ m{/bin/ksh} ) { - run_check( - { command => [ 'ksh', '-n', $file ], - description => 'ksh syntax check', - filename => $filename - } - ); - } - elsif ( $interpreter =~ m{/bin/zsh} ) { - run_check( - { command => [ 'zsh', '-n', $file ], - description => 'zsh syntax check', - filename => $filename - } - ); + subtest $filename => sub { + plan tests => 2; + run_check( + { command => [ 'ksh', '-n', $file ], + description => 'ksh syntax check', + filename => $filename + } + ); + run_check( + { command => [ 't/test-exception-wrapper', $file, 'shellcheck', '--shell=ksh' ], + description => 'shellcheck' + } + ); + } } elsif ( $interpreter =~ m{bash} ) { - run_check( - { command => [ 'bash', '-n', $file ], - description => 'bash syntax check', - filename => $filename - } - ); + subtest $filename => sub { + plan tests => 2; + run_check( + { command => [ 'bash', '-n', $file ], + description => 'bash syntax check', + filename => $filename + } + ); + run_check( + { command => [ 't/test-exception-wrapper', $file, 'shellcheck', '--exclude=SC1090,SC2009,SC2126,SC2230', '--shell=bash' ], + description => 'shellcheck' + } + ); + } } elsif ( $interpreter =~ m{/bin/zsh} ) { run_check( @@ -111,20 +152,36 @@ sub process_file { ); } elsif ( $interpreter =~ m{python3} ) { - run_check( - { command => [ 'python3', '-m', 'py_compile', $file ], - description => 'python3 compile', - filename => $filename - } - ); + subtest $filename => sub { + plan tests => 2; + run_check( + { command => [ 'python3', '-m', 'py_compile', $file ], + description => 'python3 compile', + filename => $filename + } + ); + run_check( + { command => [ 't/test-exception-wrapper', $file, 'python3', '-m', 'flake8' ], + description => 'python3-flake8' + } + ); + } } elsif ( $interpreter =~ m{python} ) { - run_check( - { command => [ 'python', '-m', 'py_compile', $file ], - description => 'python compile', - filename => $filename - } - ); + subtest $filename => sub { + plan tests => 2; + run_check( + { command => [ 'python', '-m', 'py_compile', $file ], + description => 'python compile', + filename => $filename + } + ); + run_check( + { command => [ 't/test-exception-wrapper', $file, 'python', '-m', 'flake8' ], + description => 'python-flake8' + } + ); + } } elsif ( $interpreter =~ m{php} ) { run_check( @@ -135,12 +192,21 @@ sub process_file { ); } elsif ( $interpreter =~ m{j?ruby} ) { - run_check( - { command => [ 'ruby', '-cw', $file ], - description => 'ruby syntax check', - filename => $filename - } - ); + subtest $filename => sub { + plan tests => 2; + run_check( + { command => [ 'ruby', '-cw', $file ], + description => 'ruby syntax check', + filename => $filename + } + ); + run_check( + { command => [ 't/test-exception-wrapper', $file, 'rubocop' ], + description => 'ruby style and syntax check', + filename => $filename + } + ); + } } elsif ( $interpreter =~ m{gawk} ) { run_check( diff --git a/templates/munstrap/static/css/style-munstrap.css b/templates/munstrap/static/css/style-munstrap.css index ae76f730..89210370 100644 --- a/templates/munstrap/static/css/style-munstrap.css +++ b/templates/munstrap/static/css/style-munstrap.css @@ -96,6 +96,7 @@ ul.groupview, ul.groupview ul { } img { + box-sizing: content-box; border: 2px solid transparent; } @@ -117,4 +118,4 @@ img.unkn { .text-critical:hover { color: #843534; -} \ No newline at end of file +} diff --git a/templates/munstrap/static/css/style-munstrap.min.css b/templates/munstrap/static/css/style-munstrap.min.css index 8c7da5e5..ae3ad298 100644 --- a/templates/munstrap/static/css/style-munstrap.min.css +++ b/templates/munstrap/static/css/style-munstrap.min.css @@ -1 +1 @@ -@media(min-width:992px){.modal-lg{width:940px !important}}body{padding-top:70px}img.i{display:block;margin:10px auto}img.img-zoom{cursor:pointer}div.service-alert{margin-top:10px}img#zoom_image{margin-bottom:15px}.link-domain{font-size:1.4em;color:#606}.link-host{font-size:1.1em;color:purple}ul.groupview,ul.groupview ul{list-style-type:none}.munin-icon{background:url(../img/logo-munin.png) left top;margin-top:-6px;width:35px;height:35px;display:block;float:left}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover>.dropdown-menu{display:block}.dropdown-submenu>a:after{display:block;content:" ";float:right;width:0;height:0;border:5px solid transparent;border-right-width:0;border-left-color:#ccc;margin-top:5px;margin-right:-10px}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown-submenu.pull-left{float:none}.dropdown-submenu.pull-left>.dropdown-menu{left:-100%;margin-left:10px;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}img{border:2px solid transparent}img.warn{border:2px solid #8a6d3b}img.crit{border:2px solid #a94442}img.unkn{border:2px solid #fa0}.text-critical{color:#a94442}.text-critical:hover{color:#843534} \ No newline at end of file +@media(min-width:992px){.modal-lg{width:940px!important}}body{padding-top:70px}img.i{display:block;margin:10px auto}img.img-zoom{cursor:pointer}div.service-alert{margin-top:10px}img#zoom_image{margin-bottom:15px}.link-domain{font-size:1.4em;color:#606}.link-host{font-size:1.1em;color:purple}ul.groupview,ul.groupview ul{list-style-type:none}.munin-icon{background:url(../img/logo-munin.png) left top;margin-top:-6px;width:35px;height:35px;display:block;float:left}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover>.dropdown-menu{display:block}.dropdown-submenu>a:after{display:block;content:" ";float:right;width:0;height:0;border:5px solid transparent;border-right-width:0;border-left-color:#ccc;margin-top:5px;margin-right:-10px}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown-submenu.pull-left{float:none}.dropdown-submenu.pull-left>.dropdown-menu{left:-100%;margin-left:10px;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}img{box-sizing:content-box;border:2px solid transparent}img.warn{border:2px solid #8a6d3b}img.crit{border:2px solid #a94442}img.unkn{border:2px solid #fa0}.text-critical{color:#a94442}.text-critical:hover{color:#843534} \ No newline at end of file diff --git a/templates/munstrap/templates/munin-overview.tmpl b/templates/munstrap/templates/munin-overview.tmpl index 8449022d..0da303c0 100644 --- a/templates/munstrap/templates/munin-overview.tmpl +++ b/templates/munstrap/templates/munin-overview.tmpl @@ -1,4 +1,7 @@ + @@ -50,7 +53,7 @@
  • class="last"> "> - + [ //comparison-day.html">day //comparison-week.html">week //comparison-month.html">month diff --git a/templates/munstrap/templates/partial/head.tmpl b/templates/munstrap/templates/partial/head.tmpl index 0cefcf46..46db4e43 100644 --- a/templates/munstrap/templates/partial/head.tmpl +++ b/templates/munstrap/templates/partial/head.tmpl @@ -1,7 +1,9 @@ - Munin <TMPL_LOOP NAME="PATH"><TMPL_IF NAME="pathname"> :: <TMPL_VAR ESCAPE="HTML" NAME="pathname"><TMPL_ELSE>Munin</TMPL_IF></TMPL_LOOP> + + <TMPL_IF NAME="NAME"><TMPL_VAR ESCAPE="HTML" NAME="NAME"> (</TMPL_IF>Munin <TMPL_LOOP NAME="PATH"><TMPL_IF NAME="pathname"> :: <TMPL_VAR ESCAPE="HTML" NAME="pathname"></TMPL_IF></TMPL_LOOP><TMPL_IF NAME="NAME">)</TMPL_IF> + diff --git a/templates/official/partial/head.tmpl b/templates/official/partial/head.tmpl index 1a7c2342..ae4e9b54 100644 --- a/templates/official/partial/head.tmpl +++ b/templates/official/partial/head.tmpl @@ -12,7 +12,9 @@ - <TMPL_LOOP NAME="PATH"><TMPL_IF NAME="pathname"> :: <TMPL_VAR ESCAPE="HTML" NAME="pathname"><TMPL_ELSE>Munin</TMPL_IF></TMPL_LOOP> + + <TMPL_IF NAME="NAME"><TMPL_VAR ESCAPE="HTML" NAME="NAME"> (</TMPL_IF>Munin <TMPL_LOOP NAME="PATH"><TMPL_IF NAME="pathname"> :: <TMPL_VAR ESCAPE="HTML" NAME="pathname"></TMPL_LOOP><TMPL_IF NAME="NAME">)</TMPL_IF> + /static/favicon.ico" />