1
0
Fork 0
mirror of https://github.com/munin-monitoring/contrib.git synced 2025-07-21 18:41:03 +00:00

Whitespace cleanup

* remove trailing whitespace
* remove empty lines at the end of files
This commit is contained in:
Lars Kruse 2018-08-02 02:03:42 +02:00
parent ef851f0c34
commit 17f784270a
604 changed files with 2927 additions and 2945 deletions

View file

@ -46,13 +46,13 @@ if (exists $ARGV[0] and $ARGV[0] eq "autoconf") {
}
my $ua = LWP::UserAgent->new(timeout => 30);
my @badports;
foreach my $port (@PORTS) {
my $url = sprintf $URL, $port;
my $response = $ua->request(HTTP::Request->new('GET',$url));
push @badports, $port unless $response->is_success and $response->content =~ /Scoreboard/im;
}
if (@badports) {
print "no (no apache server-status on ports @badports)\n";
exit 1;
@ -75,7 +75,7 @@ if (exists $ARGV[0] and $ARGV[0] eq "config") {
print $val, "\n";
print "activity_${port}_${char}.type GAUGE\n";
}
}
}
exit 0;
}

View file

@ -3,7 +3,7 @@
# Raphaël Droz <raphael.droz@gmail.com> - 2016-01-08
#
# Monitors the average time requests matching a custom regexp takes
# For instance monitor time execution of files in http://example.com/foo/bar,
# For instance monitor time execution of files in http://example.com/foo/bar,
# requests from google, images etc.
#
# Simply add an entry in the 'type' hashref and modify the description fields
@ -13,7 +13,7 @@
#
# NOTE: You need to add a field in your Apache logs showing time executed.
# This is normally done using the %T (seconds) or %D (microseconds)
# For instance:
# For instance:
# LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %T %v"
# Check http://httpd.apache.org/docs/2.2/mod/mod_log_config.html#formats for more info
#
@ -62,14 +62,14 @@ my $types = {
# any kind of request
total => {
munin_fields => {
label => 'All requests',
label => 'All requests',
draw => 'LINE2',
info => 'Average seconds per any request',
},
sum => 0,
lines => 0,
matches => sub {
return 1;
matches => sub {
return 1;
},
},
@ -82,11 +82,11 @@ my $types = {
},
sum => 0,
lines => 0,
matches => sub {
my ($fields) = @_;
my $script;
($script = $fields->[6]) =~ s/\?.*\z //mx;
return $script =~ m{ \.(png|jpe?g|gif|tiff|ilbm|tga) \z }mx;
matches => sub {
my ($fields) = @_;
my $script;
($script = $fields->[6]) =~ s/\?.*\z //mx;
return $script =~ m{ \.(png|jpe?g|gif|tiff|ilbm|tga) \z }mx;
},
},
};
@ -101,7 +101,7 @@ if (@ARGV && $ARGV[0] eq 'config') {
}
}
exit(0);
}
}
my $config_file = `ls -1 $ACCESS_LOG_PATTERN | tail -n 1`;
@ -121,13 +121,13 @@ foreach my $line (@lines) {
$types->{$type}->{'lines'}++;
}
}
}
}
}
foreach my $type (keys %{$types}) {
my $value = $types->{$type}->{'lines'} ? $types->{$type}->{'sum'} / $types->{$type}->{'lines'} : 'U';
printf "%s.value %s\n", ($type, $value);
}
}

View file

@ -5,10 +5,10 @@
# luis peralta - luis@11870.com
# http://www.ziritione.org
#
# Installing: configure apache blackbox and set the logfile to /var/log/blackbox.log
# Installing: configure apache blackbox and set the logfile to /var/log/blackbox.log
# or change the BLACKBOXLOG setting below.
#
# Dependencies: apache mod_logio, apache blackbox
# Dependencies: apache mod_logio, apache blackbox
# http://www.devco.net/archives/2008/03/05/detailed_apache_stats.php
#
# Last version available at: http://www.ziritione.org/http_status
@ -23,13 +23,13 @@
use strict;
my $BLACKBOXLOG = "/var/log/blackbox.log";
my $BLACKBOXLOG = "/var/log/blackbox.log";
my %WANTED = ( "apache.status.200" => "_200",
"apache.status.301" => "_301",
"apache.status.302" => "_302",
"apache.status.404" => "_404",
"apache.status.5xx" => "_5xx",
my %WANTED = ( "apache.status.200" => "_200",
"apache.status.301" => "_301",
"apache.status.302" => "_302",
"apache.status.404" => "_404",
"apache.status.5xx" => "_5xx",
);
my $arg = shift();
@ -78,7 +78,7 @@ graph_total total\n");
);
$num++;
}
}

View file

@ -2,15 +2,15 @@
Those plugins are used to monitor different projects or vhost (i.e. either different log files or using regular expression as filters) on the same web server.
## munin_byprojects_access
Count the number of hits per projects/vhost.
Count the number of hits per projects/vhost.
![byproject_access](https://www.mantor.org/~northox/misc/munin-plugins/nginx_byprojects_access1-month.png "byproject_access")
## munin_byprojects_bandwidth
Count the total bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required.
Count the total bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required.
![byproject_bandwidth](https://www.mantor.org/~northox/misc/munin-plugins/apache_byprojects_bandwidth-month.png "byproject_bandwidth")
## munin_byprojects_inout_bandwidth
Counts the in/out bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required.
Counts the in/out bandwidth used by each projects/vhost. [Logtail] (https://www.fourmilab.ch/webtools/logtail/) is required.
![byproject_inout_bandwidth](https://www.mantor.org/~northox/misc/munin-plugins/apache_byprojects_inout_bandwidth-month.png "byproject_inout_bandwidth")
## Installation

View file

@ -3,16 +3,16 @@ use strict;
#
# byprojects_access
#
# Perl script to monitor access *byprojects* (e.g. vhost) from multiple files
# Perl script to monitor access *byprojects* (e.g. vhost) from multiple files
# and/or regex.
#
# Danny Fullerton <northox@mantor.org>
# Danny Fullerton <northox@mantor.org>
# Mantor Organization <www.mantor.org>
# This work is licensed under a MIT license.
#
# You need logtail (https://www.fourmilab.ch/webtools/logtail/)
#
# Log can be gathered from multiple sources by simply specifying multiple log
# Log can be gathered from multiple sources by simply specifying multiple log
# filename or using wildcards (glob). File content can be selected using regex.
#
# - 'prod' => [ {'path' => '/home/prod/log/access.log'} ],
@ -74,7 +74,7 @@ foreach my $project ( keys %logs ) {
my @paths = glob $log->{'path'};
foreach my $path (@paths) {
my $state = $statepath.'/'.$project.$x.'_access.state';
open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or
open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or
die "Can't open $logtail : $!";
while (<LT>) {
my $buf = $_;

View file

@ -3,10 +3,10 @@ use strict;
#
# byprojects_bandwidth
#
# Perl script to monitor total bandwidth *byprojects* (e.g. vhost) from multiple
# Perl script to monitor total bandwidth *byprojects* (e.g. vhost) from multiple
# files and/or regex.
#
# Danny Fullerton <northox@mantor.org>
# Danny Fullerton <northox@mantor.org>
# Mantor Organization <www.mantor.org>
# This work is licensed under a MIT license.
#
@ -17,7 +17,7 @@ use strict;
# "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O"
# where %I is input and %O is output.
#
# Log can be gathered from multiple sources by simply specifying multiple log
# Log can be gathered from multiple sources by simply specifying multiple log
# filename or using wildcards (glob). File content can be selected using regex.
#
# - 'prod' => [ {'path' => '/home/prod/log/access.log'} ],
@ -81,7 +81,7 @@ foreach my $project ( keys %logs ) {
my @paths = glob $log->{'path'};
foreach my $path (@paths) {
my $state = $statepath.'/'.$project.$x.'_totalbandwidth.state';
open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or
open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or
die "Can't open $logtail : $!";
while (<LT>) {
my $buf = $_;

View file

@ -3,10 +3,10 @@ use strict;
#
# byprojects_inout_bandwidth
#
# Perl script to monitor in/out bandwidth *byprojects* (e.g. vhost) from
# Perl script to monitor in/out bandwidth *byprojects* (e.g. vhost) from
# multiple files and/or regex.
#
# Danny Fullerton <northox@mantor.org>
# Danny Fullerton <northox@mantor.org>
# Mantor Organization <www.mantor.org>
# This work is licensed under a MIT license.
#
@ -17,7 +17,7 @@ use strict;
# "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O"
# where %I is input and %O is output.
#
# Log can be gathered from multiple sources by simply specifying multiple log
# Log can be gathered from multiple sources by simply specifying multiple log
# filename or using wildcards (glob). File content can be selected using regex.
#
# - 'prod' => [ {'path' => '/home/prod/log/access.log'} ],
@ -84,7 +84,7 @@ foreach my $project ( keys %logs ) {
my @paths = glob $log->{'path'};
foreach my $path (@paths) {
my $state = $statepath.'/'.$project.$x.'_inoutbandwidth.state';
open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or
open(LT, "$logtail -f ".$log->{'path'}." -o $state |") or
die "Can't open $logtail : $!";
while (<LT>) {
my $buf = $_;

View file

@ -36,7 +36,7 @@ PROCS=$binname
if [ "$1" = "autoconf" ]; then
echo yes
echo yes
exit 0
fi
@ -65,5 +65,5 @@ VAL2=`ps auxf | grep ${PROCS} | grep ^${USR} | grep -v grep | awk '{s+=$6} END {
VAL3=`expr $VAL2 / $VAL1`
echo "servers.value $VAL3"

View file

@ -34,7 +34,7 @@ USR=$apuser
PROCS=$binname
if [ "$1" = "autoconf" ]; then
echo yes
echo yes
exit 0
fi

View file

@ -8,7 +8,7 @@
# to Apache2::SizeLimit.
# Author: Kjetil Kjernsmo <kjetilk@opera.com>, based on work by William Viker
# Copyright (C) 2007 Opera Software ASA
# Copyright (C) 2007 Opera Software ASA
#
# Contibutors: Earle Nietzel <earle.nietzel@gmail.com>
#

View file

@ -144,7 +144,7 @@ if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) {
if ($response->content =~ /^Total Accesses:/im ) {
next;
} else {
print "no (ExtendedStatus option for apache"
print "no (ExtendedStatus option for apache"
. " mod_status is missing on port $port)\n";
exit 0;
}

View file

@ -32,7 +32,7 @@ GPLv2
if [ "$1" = "autoconf" ]; then
echo yes
echo yes
exit 0
fi
@ -81,8 +81,3 @@ done
echo "threads.value `echo $((SUM / $COUNT))`"

View file

@ -35,7 +35,7 @@ PROCS=$binname
if [ "$1" = "autoconf" ]; then
echo yes
echo yes
exit 0
fi
@ -60,5 +60,5 @@ VAL1=`ps auxf | grep ${PROCS} | grep -v grep | awk '{s+=$6} END {print s}'`
VAL2=`expr $VAL1 / 1024`
echo "servers.value $VAL2"

View file

@ -100,7 +100,7 @@ do
else
USERNAME[$I]="anonymous"
fi
NAME[$I]="_${USERNAME[I]}" # Output sort order
NAME[$I]="_${USERNAME[I]}" # Output sort order
else
NAME[$I]="${USERNAME[I]}"
fi
@ -140,7 +140,7 @@ then
FILENAMES=$( find $DIRECTORY -type f -not -wholename $TIMESTAMP | sort)
awk '{ printf "%s.label %s\n%s.draw AREA\n", $1, $3, $1 }' $( echo "$FILENAMES" | head -n1 )
for FILENAME in $( echo "$FILENAMES" | tail -n+2)
do
awk '{ printf "%s.label %s\n%s.draw STACK\n", $1, $3, $1 }' $FILENAME

View file

@ -95,7 +95,7 @@ sub count {
#find sitename
$file=~s/$site/$1/;
$file=$vhost if $vhost;
# skip broken lines
next unless $file;
@ -103,7 +103,7 @@ sub count {
my $vpm=clean_fieldname("$file");
$temp{$vpm}{'label'}="$file";
$temp{$vpm}{'label'}=~s/www\.//;
# count all requests
$temp{$vpm}{'requests'}++;
@ -118,13 +118,13 @@ sub count {
# average bytes
$temp{$vpm}{'avg_bytes'}=$temp{$vpm}{'bytes'}/$temp{$vpm}{'requests'} || 0;
}
# count by status / error code
$temp{$vpm}{"status"}{$status}++ if $status;
if ($time) {
# microsec to millisec
$time=sprintf("%d",$time/1000);
$time=sprintf("%d",$time/1000);
# min/max execution time
$temp{$vpm}{'max_time'}=max($temp{$vpm}{'max_time'},$time) || 0;
@ -144,9 +144,9 @@ while (1) {
# tail files, calls &count with linearray
$tail->read;
# begin transaction
# begin transaction
$share->lock(LOCK_EX);
# get data (may be updated by other loggers too)
my %old=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run
@ -182,7 +182,7 @@ while (1) {
$share->store( freeze \%old );
# end transaction
$share->unlock;
# parse/write every n seconds (plus processing time)
sleep $nsec;
}

View file

@ -40,12 +40,12 @@ while (<STDIN>) {
# sanity check
next unless m/^([\d\w\.\-_]+\s){5}([\d\w\.\-_]+$)/; # escaped "." and "-"
# sitename to munin fieldname
my $vpm=clean_fieldname($vhost);
$temp{$vpm}{'label'}=$vhost;
$temp{$vpm}{'label'}=~s/www\.//;
# count all requests
$temp{$vpm}{'requests'}++;
@ -60,7 +60,7 @@ while (<STDIN>) {
# average bytes
$temp{$vpm}{'avg_bytes'}=$temp{$vpm}{'bytes'}/$temp{$vpm}{'requests'} || 0 if ($bytes);
}
# count by status / error code
$temp{$vpm}{"status"}{$status}++ if $status;
@ -77,9 +77,9 @@ while (<STDIN>) {
};
sub periodic_write {
# begin transaction
# begin transaction
$share->lock(LOCK_EX);
# get data (may be updated by other loggers too)
my %old=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run

View file

@ -18,13 +18,13 @@ This plugin requires data from apache. You can get at the data in two ways:
- slightly less performant, but easier to apply to existing installations
- If you want response time stats, you have to log them in apache:
<IfModule mod_log_config.c>
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined-time
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined-time
</IfModule>
- Configure the log parser to match your installation regarding naming and log folders
You can use both solutions simultaneously, the data will be merged.
You can use both solutions simultaneously, the data will be merged.
Be aware that a apache log CustomLog directive in the master config will only log those vhosts that have no directive of their own.
Install plugin conf (after [apache_*])
[apache_vhosts]
@ -36,14 +36,14 @@ env.checks requests bytes time
# subgraphs - create multigraph subgraphs (watch your graphing performance...), default 0
# checks - enable stats on bytes and response times per request, you have to log these in apache
A word on performance:
A word on performance:
Requests/sec should not be much of a problem. Pipelogger and Logparser should not have man performance problems, as the apply one regex per line and add some stats.
Stats are saved every n seconds (default: 7) to shared mem in serialized format. That should be ok on the most loaded servers (unless you watch cache logs).
I would estimate that > 10k log lines/sec could start becoming a problem, you might have to start tuning there or use a dedicated system.
Stats are saved every n seconds (default: 7) to shared mem in serialized format. That should be ok on the most loaded servers (unless you watch cache logs).
I would estimate that > 10k log lines/sec could start becoming a problem, you might have to start tuning there or use a dedicated system.
You might think about splitting the logs over multiple Logparser scripts to parallelize and merge in larger intervals.
Graphing is another matter, the more vhosts you have.
With subgraphs off, you do 3 main graphs * 4 timescales (day, week, month, year).
Graphing is another matter, the more vhosts you have.
With subgraphs off, you do 3 main graphs * 4 timescales (day, week, month, year).
With subgraphs on, you get 2 checks * (1 + 6 * #vhosts) + 1 check * (1 + #vhosts * #statuscodes * 4)
With hundreds of vhosts that becomes a problem, as munin-update and munin-html do not scale well.
@ -152,7 +152,7 @@ ${site}_${graph}_$check.type GAUGE
END
} # end graph
} # end sites
} # end subgraph
} # end subgraph
} # end checks
@ -173,7 +173,7 @@ graph_period minute
graph_order $order
END
foreach my $site (keys %data) {
print <<END;
${site}_requests.label $data{$site}{'label'}
${site}_requests.info $site
@ -230,7 +230,7 @@ foreach my $check (keys %checks) {
} # end sites
if ($subgraphs{$check}) {
# subgraph values
# subgraph values
foreach my $site (keys %data) {
print "\nmultigraph apache_vhosts_$check.$site\n";
foreach my $graph ("avg","max") {

View file

@ -43,13 +43,13 @@ if (exists $ARGV[0] and $ARGV[0] eq "autoconf") {
}
my $ua = LWP::UserAgent->new (timeout => 30);
my @badports;
foreach my $port (@PORTS) {
my $url = sprintf $URL, $port;
my $response = $ua->request (HTTP::Request->new('GET', $url));
push @badports, $port unless $response->is_success;
}
if (@badports) {
print "no (no mod_watch exists on ports @badports)\n";
exit 1;
@ -76,7 +76,7 @@ foreach my $port (@PORTS) {
my ($server, undef, $ifInOctets, $ifOutOctets, $ifRequests,
$ifDocuments) = split (/\s/, $string, 6);
push @servers, $server unless $server eq "SERVER";
push @data, "$server $ifInOctets $ifOutOctets $ifRequests $ifDocuments"
push @data, "$server $ifInOctets $ifOutOctets $ifRequests $ifDocuments"
unless $server eq "SERVER";
}
}

View file

@ -22,38 +22,38 @@
#
# apache2.conf
# LogFormat "%h %l %u %t \"%r\" %>s %O %b %D \"%{Referer}i\" \"%{User-Agent}i\"" custom
#
#
# According to : http://httpd.apache.org/docs/2.2/mod/mod_log_config.html
# %D The time taken to serve the request, in microseconds.
# In our case %D -> 9
######################################################################################################
######################################################################################################
# GLOBALS
LOGFILE="/var/log/apache2/access.log"
BUFFER_SIZE=500
######################################################################################################
do_ () {
command="tail -n $BUFFER_SIZE $LOGFILE | awk '{sum=sum+\$9} END {print \"exec_time.value \"(sum/$BUFFER_SIZE)/1000000}'"
eval $command
exit 0
}
do_config () {
echo "graph_title Average page execution time"
echo "graph_vlabel Seconds"
echo "graph_category webserver"
echo "graph_args --base 1000 -l 0"
echo "graph_info Average page execution time"
echo "exec_time.label Execution time"
echo "exec_time.type GAUGE"
}
case $1 in
config|'')
eval do_$1
esac
exit $?