New/updated plugins
* moved netstat_bsd_ (collecting netstat -m stats) to netstat_bsd_m_ * added netstat_bsd_s_ collecting netstat -s (and some images) * updated ejabberd_scanlog plugin (fixed error message, added a couple of new log types)
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env ruby
|
||||
require 'yaml'
|
||||
|
||||
# ejabberd_scanlog revision 1 (Feb 2012)
|
||||
# ejabberd_scanlog revision 2 (Mar 2012)
|
||||
#
|
||||
# Scans ejabberd 2.1.x log for known error signatures and counts them
|
||||
#
|
||||
|
@ -13,6 +13,11 @@ require 'yaml'
|
|||
# env.log: ejabberd log file (defaults to /var...)
|
||||
#
|
||||
# Author: Artem Sheremet <dot.doom@gmail.com>
|
||||
|
||||
#
|
||||
# Run with 'debug' argument to initiate full log rescan.
|
||||
# This will also print out unparsed log entries to stderr.
|
||||
# Cache file will be untouched.
|
||||
#
|
||||
|
||||
LOG_FILE = ENV['log'] || '/var/log/ejabberd/ejabberd.log'
|
||||
|
@ -20,17 +25,21 @@ CACHE_FILE = '/tmp/ejabberd_scanlog_cache' # cache file position
|
|||
|
||||
DEFAULT_CACHE = { :start => 0 }
|
||||
|
||||
debug_mode = ARGV.first == 'debug'
|
||||
$debug_mode = ARGV.first == 'debug'
|
||||
|
||||
begin
|
||||
log_info = YAML.load IO.read(CACHE_FILE)
|
||||
rescue
|
||||
if $debug_mode
|
||||
log_info = DEFAULT_CACHE
|
||||
end
|
||||
else
|
||||
begin
|
||||
log_info = YAML.load IO.read(CACHE_FILE)
|
||||
rescue
|
||||
log_info = DEFAULT_CACHE
|
||||
end
|
||||
|
||||
if File.size(LOG_FILE) < log_info[:start]
|
||||
# logrotate?
|
||||
log_info = DEFAULT_CACHE
|
||||
if File.size(LOG_FILE) < log_info[:start]
|
||||
# logrotate?
|
||||
log_info = DEFAULT_CACHE
|
||||
end
|
||||
end
|
||||
|
||||
new_data = ''
|
||||
|
@ -53,9 +62,12 @@ LABELS = {
|
|||
:sql_transactions_exceeded => 'SQL transaction restarts exceeded',
|
||||
:unexpected_info => 'Unexpected info',
|
||||
:other_sql_cmd_timeout => 'Other sql_cmd timeout',
|
||||
:system_ports_limit => 'System limit hit: ports', # check with length(erlang:ports())., set in ejabberdctl config file
|
||||
:system_limit => 'Other system limit hit', # processes? check with erlang:system_info(process_count)., erlang:system_info(process_limit)., set in ejabberdctl cfg
|
||||
:generic_server_terminating => 'Generic server terminating',
|
||||
:UNKNOWN => 'Unknown error/warning'
|
||||
}
|
||||
def log_type(text, debug_mode)
|
||||
def log_type(text)
|
||||
if text.include? 'ejabberd_odbc_sup'
|
||||
:ejabberd_odbc_failure
|
||||
elsif text.include? "mod_pubsub_odbc,'-unsubscribe"
|
||||
|
@ -86,8 +98,14 @@ def log_type(text, debug_mode)
|
|||
:unexpected_info
|
||||
elsif text.include?('timeout') and text.include?('sql_cmd')
|
||||
:other_sql_cmd_timeout
|
||||
elsif text.include?('system_limit') and text.include?('open_port')
|
||||
:system_ports_limit
|
||||
elsif text.include?('system_limit')
|
||||
:system_limit
|
||||
elsif text.include?('Generic server') and text.include?('terminating')
|
||||
:generic_server_terminating
|
||||
else
|
||||
puts "Cannot parse text: #{text}" if debug_mode
|
||||
warn "Cannot parse text: #{text}" if $debug_mode
|
||||
:UNKNOWN
|
||||
end
|
||||
end
|
||||
|
@ -99,18 +117,18 @@ new_data.split("\n=").each { |report|
|
|||
next unless type and time and text
|
||||
|
||||
log_info[type] = (log_info[type] || 0) + 1
|
||||
if sub_type = log_type(text, debug_mode)
|
||||
if sub_type = log_type(text)
|
||||
log_info[sub_type] = (log_info[sub_type] || 0) + 1
|
||||
end
|
||||
}
|
||||
|
||||
log_info[:start] += new_data.size
|
||||
File.open(CACHE_FILE, 'w') { |f| f.write log_info.to_yaml }
|
||||
File.open(CACHE_FILE, 'w') { |f| f.write log_info.to_yaml } unless $debug_mode
|
||||
|
||||
if ARGV.first == 'config'
|
||||
puts <<CONFIG
|
||||
graph_title Ejabberd Log
|
||||
graph_vtitle Report count
|
||||
graph_vtitle per period
|
||||
graph_category ejabberd
|
||||
graph_args -l 0
|
||||
graph_order #{(LABELS.keys + log_info.keys.select { |k| k.is_a? String }.sort).join(' ')}
|
||||
|
@ -128,6 +146,7 @@ LABELS.each_pair { |type,label|
|
|||
else
|
||||
'STACK'
|
||||
end
|
||||
'LINE'
|
||||
}"
|
||||
else
|
||||
puts "#{type}.value #{log_info[type] or 0}"
|
||||
|
@ -139,7 +158,7 @@ log_info.each_pair { |k,value|
|
|||
if k.is_a? String
|
||||
if ARGV.first == 'config'
|
||||
puts "#{k}.label #{k}"
|
||||
puts "#{k}.draw LINE2"
|
||||
puts "#{k}.draw LINE"
|
||||
else
|
||||
puts "#{k}.value #{value}"
|
||||
end
|
||||
|
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 38 KiB |
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
# netstat_bsd revision 1 (Feb 2012)
|
||||
# netstat_bsd_m revision 1 (Feb 2012)
|
||||
#
|
||||
# This plugin shows various statistics from 'netstat -m'
|
||||
#
|
||||
|
@ -17,7 +17,7 @@
|
|||
#%# capabilities=autoconf suggest
|
||||
|
||||
# original filename
|
||||
PLUGIN_NAME = 'netstat_bsd_'
|
||||
PLUGIN_NAME = 'netstat_bsd_m_'
|
||||
|
||||
class String
|
||||
def escape
|
||||
|
@ -31,7 +31,7 @@ class String
|
|||
end
|
||||
end
|
||||
|
||||
def netstat(filter = nil)
|
||||
def netstat_m(filter = nil)
|
||||
Hash[`netstat -m`.split($/).map { |line|
|
||||
if line =~ /^([\d\/K]+) (.*) \(([\w\/+]+)\)$/
|
||||
# 7891K/22385K/30276K bytes allocated to network (current/cache/total)
|
||||
|
@ -52,9 +52,9 @@ case ARGV.first
|
|||
when 'autoconf'
|
||||
puts `uname -s`.include?('FreeBSD') ? 'yes' : 'no'
|
||||
when 'suggest'
|
||||
puts netstat.keys.map(&:escape).join $/
|
||||
puts netstat_m.keys.map(&:escape).join $/
|
||||
when 'config'
|
||||
data = netstat(stat_name)
|
||||
data = netstat_m(stat_name)
|
||||
if data.empty?
|
||||
warn "no data for <#{stat_name}>. Try running with 'suggest'"
|
||||
else
|
||||
|
@ -84,7 +84,7 @@ CONFIG
|
|||
}.join $/
|
||||
end
|
||||
when nil # fetch
|
||||
data = netstat(stat_name)
|
||||
data = netstat_m(stat_name)
|
||||
unless data.empty?
|
||||
puts data.first.last.map { |name, value|
|
||||
value = value.to_i * 1024 if value.end_with? 'K'
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
331
plugins/network/netstat_bsd_s_/netstat_bsd_s_
Executable file
|
@ -0,0 +1,331 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
# netstat_bsd_s revision 1 (Mar 2012)
|
||||
#
|
||||
# This plugin shows various statistics from 'netstat -s'
|
||||
#
|
||||
# Required privileges: none
|
||||
#
|
||||
# OS:
|
||||
# Supposed: BSD
|
||||
# Tested: FreeBSD 8.2
|
||||
#
|
||||
# Author: Artem Sheremet <dot.doom@gmail.com>
|
||||
#
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf suggest
|
||||
|
||||
# original filename
|
||||
PLUGIN_NAME = 'netstat_bsd_s_'
|
||||
|
||||
$debug_mode = ARGV.first == 'debug'
|
||||
|
||||
class String
|
||||
def escape
|
||||
self.gsub /[^\w]/, '_'
|
||||
end
|
||||
|
||||
unless method_defined? :start_with?
|
||||
def start_with?(str)
|
||||
self[0...str.size] == str
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
class Graph
|
||||
def initialize(name, protocol, parse_expr)
|
||||
@name, @protocol, @parse_expr = name, protocol, parse_expr
|
||||
end
|
||||
|
||||
def config
|
||||
config_options = []
|
||||
|
||||
# first, build a list of multigraphs (one graph per unit)
|
||||
# Hash key is unit, and the value is array of labels
|
||||
multigraphs = {}
|
||||
@parse_expr.each { |expr, descr|
|
||||
descr.each { |entry|
|
||||
labels_array = (multigraphs[entry.first] ||= [])
|
||||
labels_array.push entry.last
|
||||
}
|
||||
}
|
||||
|
||||
multigraphs.each_pair { |unit, labels|
|
||||
# now just add options to the config
|
||||
|
||||
config_options += [
|
||||
"multigraph #{name(unit)}",
|
||||
"graph_title Netstat: #{@protocol}: #{@name}#{" (#{unit})" if multigraphs.size > 1}",
|
||||
"graph_category netstat",
|
||||
"graph_vtitle per second",
|
||||
"graph_order #{labels.map(&:escape).join(' ')}"
|
||||
]
|
||||
|
||||
config_options.push "graph_args --base 1024" if unit == 'bytes'
|
||||
|
||||
labels.each { |label|
|
||||
label_esc = label.escape
|
||||
config_options += [
|
||||
"#{label_esc}.type DERIVE",
|
||||
"#{label_esc}.min 0",
|
||||
"#{label_esc}.draw LINE",
|
||||
"#{label_esc}.label #{label}"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
config_options
|
||||
end
|
||||
|
||||
def fetch(data)
|
||||
output_data = []
|
||||
|
||||
# first build a set of multigraphs, one per unit.
|
||||
# Hash key is unit, and the value is a hash of 'escaped label' => 'value'
|
||||
multigraphs = {}
|
||||
@parse_expr.each { |expr, descr|
|
||||
index = data.index { |line| line =~ expr }
|
||||
if index
|
||||
data.delete_at index
|
||||
$~[1..-1].zip(descr).each { |value, info|
|
||||
unit, label = info
|
||||
(multigraphs[unit] ||= {})[label.escape] = value
|
||||
}
|
||||
else
|
||||
warn "no line found for #{expr}, #{descr}" if $debug_mode
|
||||
end
|
||||
}
|
||||
|
||||
multigraphs.each_pair { |unit, values|
|
||||
output_data.push "multigraph #{name(unit)}"
|
||||
output_data += values.map { |label, value| "#{label}.value #{value}" }
|
||||
}
|
||||
|
||||
output_data
|
||||
end
|
||||
|
||||
def name(unit)
|
||||
"#{PLUGIN_NAME}#{@protocol}_#{@name.escape}_#{unit}"
|
||||
end
|
||||
end
|
||||
|
||||
def graphs_for(protocol)
|
||||
case protocol
|
||||
# order of the graps in each section is important for parsing.
|
||||
# At the same time, it is not important for munin, so we are OK placing it in parsing order here.
|
||||
when 'tcp'
|
||||
[
|
||||
Graph.new('sent', protocol, [
|
||||
# for each match of the regexp, there is a matching entry in the array.
|
||||
# First element of an entry is unit name, and the second is label.
|
||||
# It could be reasonable to add third etc as warning and critical values.
|
||||
|
||||
[ /(\d+) packets sent$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) data packets \((\d+) bytes\)$/, [ [ :packets, 'data' ], [ :bytes, 'data' ] ] ],
|
||||
[ /(\d+) data packets \((\d+) bytes\) retransmitted$/, [ [ :packets, 'retransmitted' ], [ :bytes, 'retransmitted' ] ] ],
|
||||
[ /(\d+) data packets unnecessarily retransmitted$/, [ [ :packets, 'unnecessarily retransmitted' ] ] ],
|
||||
[ /(\d+) resends initiated by MTU discovery$/, [ [ :packets, 'resends initiated by MTU discovery' ] ] ],
|
||||
[ /(\d+) ack-only packets \((\d+) delayed\)$/, [ [ :packets, 'ack-only' ], [ :packets, 'ack-only delayed' ] ] ],
|
||||
[ /(\d+) URG only packets$/, [ [ :packets, 'URG only' ] ] ],
|
||||
[ /(\d+) window probe packets$/, [ [ :packets, 'window probe' ] ] ],
|
||||
[ /(\d+) window update packets$/, [ [ :packets, 'window update' ] ] ],
|
||||
[ /(\d+) control packets$/, [ [ :packets, 'control' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('received', protocol, [
|
||||
[ /(\d+) packets received$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) acks \(for (\d+) bytes\)$/, [ [ :packets, 'acks' ], [ :bytes, 'acks' ] ] ],
|
||||
[ /(\d+) duplicate acks$/, [ [ :packets, 'duplicate acks' ] ] ],
|
||||
[ /(\d+) acks for unsent data$/, [ [ :packets, 'acks for unsent data' ] ] ],
|
||||
[ /(\d+) packets \((\d+) bytes\) received in-sequence$/, [ [ :packets, 'in-sequence' ], [ :bytes, 'in-sequence' ] ] ],
|
||||
[ /(\d+) completely duplicate packets \((\d+) bytes\)$/, [ [ :packets, 'completely duplicate' ], [ :bytes, 'completely duplicate' ] ] ],
|
||||
[ /(\d+) old duplicate packets$/, [ [ :packets, 'old duplicate' ] ] ],
|
||||
[ /(\d+) packets with some dup\. data \((\d+) bytes duped\)$/, [ [ :packets, 'some dup. data' ], [ :bytes, 'partial dups' ] ] ],
|
||||
[ /(\d+) out-of-order packets \((\d+) bytes\)$/, [ [ :packets, 'out-of-order' ], [ :bytes, 'out-of-order' ] ] ],
|
||||
[ /(\d+) packets \((\d+) bytes\) of data after window$/, [ [ :packets, 'data after window' ], [ :bytes, 'data after window' ] ] ],
|
||||
[ /(\d+) window probes$/, [ [ :packets, 'window probes' ] ] ],
|
||||
[ /(\d+) window update packets$/, [ [ :packets, 'window update' ] ] ],
|
||||
[ /(\d+) packets received after close$/, [ [ :packets, 'after close' ] ] ],
|
||||
[ /(\d+) discarded for bad checksums$/, [ [ :packets, 'bad checksums' ] ] ],
|
||||
[ /(\d+) discarded for bad header offset field$/, [ [ :packets, 'bad header offset flds' ] ] ],
|
||||
[ /(\d+) discarded because packet too short$/, [ [ :packets, 'too short' ] ] ],
|
||||
[ /(\d+) discarded due to memory problems$/, [ [ :packets, 'discarded: memory problems' ] ] ],
|
||||
[ /(\d+) ignored RSTs in the windows$/, [ [ :packets, 'ignored RSTs in windows' ] ] ],
|
||||
[ /(\d+) segments updated rtt \(of (\d+) attempts\)$/, [ [ :packets, 'RTT: updated' ], [ :packets, 'RTT: attempts to update' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('connections', protocol, [
|
||||
[ /(\d+) connection requests$/, [ [ :connections, 'requests' ] ] ],
|
||||
[ /(\d+) connection accepts$/, [ [ :connections, 'accepts' ] ] ],
|
||||
[ /(\d+) bad connection attempts$/, [ [ :connections, 'bad attempts' ] ] ],
|
||||
[ /(\d+) listen queue overflows$/, [ [ :connections, 'listen queue overflows' ] ] ],
|
||||
[ /(\d+) connections established \(including accepts\)$/, [ [ :connections, 'established' ] ] ],
|
||||
[ /(\d+) connections closed \(including (\d+) drops\)$/, [ [ :connections, 'closed' ], [ :connections, 'dropped' ] ] ],
|
||||
[ /(\d+) connections updated cached RTT on close$/, [ [ :connections, 'closed & upd cached RTT' ] ] ],
|
||||
[ /(\d+) connections updated cached RTT variance on close$/, [ [ :connections, 'closed & upd cached RTT variance' ] ] ],
|
||||
[ /(\d+) connections updated cached ssthresh on close$/, [ [ :connections, 'closed & upd cached ssthresh' ] ] ],
|
||||
[ /(\d+) embryonic connections dropped$/, [ [ :connections, 'embryonic dropped' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('timeouts', protocol, [
|
||||
[ /(\d+) retransmit timeouts$/, [ [ :connections, 'retransmit' ] ] ],
|
||||
[ /(\d+) connections dropped by rexmit timeout$/, [ [ :connections, 'retransmit: dropped' ] ] ],
|
||||
[ /(\d+) persist timeouts$/, [ [ :connections, 'persist' ] ] ],
|
||||
[ /(\d+) connections dropped by persist timeout$/, [ [ :connections, 'persist: dropped' ] ] ],
|
||||
[ /(\d+) Connections \(fin_wait_2\) dropped because of timeout$/, [ [ :connections, 'fin_wait_2: dropped' ] ] ],
|
||||
[ /(\d+) keepalive timeouts$/, [ [ :connections, 'keepalive' ] ] ],
|
||||
[ /(\d+) keepalive probes sent$/, [ [ :connections, 'keepalive: probes sent' ] ] ],
|
||||
[ /(\d+) connections dropped by keepalive$/, [ [ :connections, 'keepalive: dropped' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('correct predictions', protocol, [
|
||||
[ /(\d+) correct ACK header predictions$/, [ [ :predictions, 'ACK header' ] ] ],
|
||||
[ /(\d+) correct data packet header predictions$/, [ [ :predictions, 'data packet header' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('SYN', protocol, [
|
||||
[ /(\d+) syncache entries added$/, [ [ :entries, 'cache added' ] ] ],
|
||||
[ /(\d+) cookies sent$/, [ [ :entries, 'cookies sent' ] ] ],
|
||||
[ /(\d+) cookies received$/, [ [ :entries, 'cookies received' ] ] ],
|
||||
[ /(\d+) retransmitted$/, [ [ :entries, 'retransmitted' ] ] ],
|
||||
[ /(\d+) dupsyn$/, [ [ :entries, 'duplicates' ] ] ],
|
||||
[ /(\d+) dropped$/, [ [ :entries, 'dropped' ] ] ],
|
||||
[ /(\d+) completed$/, [ [ :entries, 'completed' ] ] ],
|
||||
[ /(\d+) bucket overflow$/, [ [ :entries, 'bucket overflow' ] ] ],
|
||||
[ /(\d+) cache overflow$/, [ [ :entries, 'cache overflow' ] ] ],
|
||||
[ /(\d+) reset$/, [ [ :entries, 'reset' ] ] ],
|
||||
[ /(\d+) stale$/, [ [ :entries, 'stale' ] ] ],
|
||||
[ /(\d+) aborted$/, [ [ :entries, 'aborted' ] ] ],
|
||||
[ /(\d+) badack$/, [ [ :entries, 'bad ACK' ] ] ],
|
||||
[ /(\d+) unreach$/, [ [ :entries, 'unreachable' ] ] ],
|
||||
[ /(\d+) zone failures$/, [ [ :entries, 'zone failures' ] ] ],
|
||||
]),
|
||||
|
||||
Graph.new('SACK', protocol, [
|
||||
[ /(\d+) SACK recovery episodes$/, [ [ :packets, 'recovery episodes' ] ] ],
|
||||
[ /(\d+) segment rexmits in SACK recovery episodes$/, [ [ :packets, 'segment rexmits' ] ] ],
|
||||
[ /(\d+) byte rexmits in SACK recovery episodes$/, [ [ :bytes, 'bytes rexmitted' ] ] ],
|
||||
[ /(\d+) SACK options \(SACK blocks\) received$/, [ [ :packets, 'options blocks rcvd' ] ] ],
|
||||
[ /(\d+) SACK options \(SACK blocks\) sent$/, [ [ :packets, 'options blocks sent' ] ] ],
|
||||
[ /(\d+) SACK scoreboard overflow$/, [ [ :packets, 'scoreboard overflow' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('ECN', protocol, [
|
||||
[ /(\d+) packets with ECN CE bit set$/, [ [ :packets, 'CE bit' ] ] ],
|
||||
[ /(\d+) packets with ECN ECT\(0\) bit set$/, [ [ :packets, 'ECT(0) bit' ] ] ],
|
||||
[ /(\d+) packets with ECN ECT\(1\) bit set$/, [ [ :packets, 'ECT(1) bit' ] ] ],
|
||||
[ /(\d+) successful ECN handshakes$/, [ [ :packets, 'successfull handshakes' ] ] ],
|
||||
[ /(\d+) times ECN reduced the congestion window$/, [ [ :packets, 'congestion window reduced' ] ] ]
|
||||
])
|
||||
]
|
||||
when 'udp'
|
||||
[
|
||||
Graph.new('received', protocol, [
|
||||
[ /(\d+) datagrams received$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) with incomplete header$/, [ [ :packets, 'incomplete header' ] ] ],
|
||||
[ /(\d+) with bad data length field$/, [ [ :packets, 'bad data length field' ] ] ],
|
||||
[ /(\d+) with bad checksum$/, [ [ :packets, 'bad checksum' ] ] ],
|
||||
[ /(\d+) with no checksum$/, [ [ :packets, 'no checksum' ] ] ],
|
||||
[ /(\d+) dropped due to no socket$/, [ [ :packets, 'dropped: no socket' ] ] ],
|
||||
[ /(\d+) broadcast\/multicast datagrams undelivered$/, [ [ :packets, '*cast undelivered' ] ] ],
|
||||
[ /(\d+) dropped due to full socket buffers$/, [ [ :packets, 'dropped: no buffers' ] ] ],
|
||||
[ /(\d+) not for hashed pcb$/, [ [ :packets, 'not for hashed pcb' ] ] ],
|
||||
[ /(\d+) delivered$/, [ [ :packets, 'delivered' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('sent', protocol, [
|
||||
[ /(\d+) datagrams output$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) times multicast source filter matched$/, [ [ :packets, 'multicast src filter match' ] ] ]
|
||||
])
|
||||
]
|
||||
when 'ip'
|
||||
[
|
||||
Graph.new('received', protocol, [
|
||||
[ /(\d+) total packets received$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) bad header checksums$/, [ [ :packets, 'bad header checksum' ] ] ],
|
||||
[ /(\d+) with size smaller than minimum$/, [ [ :packets, 'size smaller than min' ] ] ],
|
||||
[ /(\d+) with data size < data length$/, [ [ :packets, 'data size < data length' ] ] ],
|
||||
[ /(\d+) with ip length > max ip packet size$/, [ [ :packets, 'ip length > max ip packet sz' ] ] ],
|
||||
[ /(\d+) with header length < data size$/, [ [ :packets, 'header length < data size' ] ] ],
|
||||
[ /(\d+) with data length < header length$/, [ [ :packets, 'data length < header length' ] ] ],
|
||||
[ /(\d+) with bad options$/, [ [ :packets, 'bad options' ] ] ],
|
||||
[ /(\d+) with incorrect version number$/, [ [ :packets, 'incorrect version' ] ] ],
|
||||
[ /(\d+) fragments received$/, [ [ :packets, 'fragments' ] ] ],
|
||||
[ /(\d+) fragments dropped \(dup or out of space\)$/, [ [ :packets, 'frags dropped: dup/out of spc' ] ] ],
|
||||
[ /(\d+) fragments dropped after timeout$/, [ [ :packets, 'frags dropped: timeout' ] ] ],
|
||||
[ /(\d+) packets reassembled ok$/, [ [ :packets, 'reassembled ok' ] ] ],
|
||||
[ /(\d+) packets for this host$/, [ [ :packets, 'for this host' ] ] ],
|
||||
[ /(\d+) packets for unknown\/unsupported protocol$/, [ [ :packets, 'for unknown/unsup protocol' ] ] ],
|
||||
[ /(\d+) packets forwarded \((\d+) packets fast forwarded\)$/, [ [ :packets, 'forwarded' ], [ :packets, 'fast forwarded' ] ] ],
|
||||
[ /(\d+) packets not forwardable$/, [ [ :packets, 'not forwardable' ] ] ],
|
||||
[ /(\d+) packets received for unknown multicast group$/, [ [ :packets, 'unknown multicast grp' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('sent', protocol, [
|
||||
[ /(\d+) packets sent from this host$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) redirects sent$/, [ [ :packets, 'redirect' ] ] ],
|
||||
[ /(\d+) packets sent with fabricated ip header$/, [ [ :packets, 'fabricated IP head' ] ] ],
|
||||
[ /(\d+) output packets dropped due to no bufs, etc\.$/, [ [ :packets, 'dropped: no bufs, etc' ] ] ],
|
||||
[ /(\d+) output packets discarded due to no route$/, [ [ :packets, 'discarded: no route' ] ] ],
|
||||
[ /(\d+) output datagrams fragmented$/, [ [ :packets, 'fragmented' ] ] ],
|
||||
[ /(\d+) fragments created$/, [ [ :packets, 'fragments created' ] ] ],
|
||||
[ /(\d+) datagrams that can't be fragmented$/, [ [ :packets, "can't be fragmented" ] ] ],
|
||||
[ /(\d+) tunneling packets that can't find gif$/, [ [ :packets, 'tunneling, gif not found' ] ] ],
|
||||
[ /(\d+) datagrams with bad address in header$/, [ [ :packets, 'bad address in header' ] ] ]
|
||||
])
|
||||
]
|
||||
when 'arp'
|
||||
[
|
||||
Graph.new('sent', protocol, [
|
||||
[ /(\d+) ARP requests sent$/, [ [ :packets, 'requests' ] ] ],
|
||||
[ /(\d+) ARP replies sent$/, [ [ :packets, 'replies' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('received', protocol, [
|
||||
[ /(\d+) ARP packets received$/, [ [ :packets, 'total' ] ] ],
|
||||
[ /(\d+) ARP requests received$/, [ [ :packets, 'requests' ] ] ],
|
||||
[ /(\d+) ARP replies received$/, [ [ :packets, 'replies' ] ] ],
|
||||
[ /(\d+) total packets dropped due to no ARP entry$/, [ [ :packets, 'dropped: no entry' ] ] ]
|
||||
]),
|
||||
|
||||
Graph.new('entries', protocol, [
|
||||
[ /(\d+) ARP entrys timed out$/, [ [ :entries, 'timed out' ] ] ],
|
||||
[ /(\d+) Duplicate IPs seen$/, [ [ :entries, 'duplicate IPs seen' ] ] ]
|
||||
])
|
||||
]
|
||||
end
|
||||
end
|
||||
|
||||
proto_name = File.basename($0, '.*').escape
|
||||
proto_name.slice! 0, PLUGIN_NAME.size if proto_name.start_with? PLUGIN_NAME
|
||||
|
||||
proto_name = 'tcp' if proto_name.empty?
|
||||
|
||||
def netstat_s_protocols
|
||||
`netstat -s`.select { |line|
|
||||
line =~ /^\w+:$/
|
||||
}.map! { |proto| proto.strip[0..-2] }
|
||||
end
|
||||
|
||||
def netstat_s(protocol)
|
||||
`netstat -sp #{protocol}`.select { |line| not line.start_with? protocol }
|
||||
end
|
||||
|
||||
case ARGV.first
|
||||
when 'autoconf'
|
||||
puts `uname -s`.include?('FreeBSD') ? 'yes' : 'no'
|
||||
when 'suggest'
|
||||
puts netstat_s_protocols.map(&:escape).join $/
|
||||
when 'config'
|
||||
graphs_for(proto_name).each { |graph|
|
||||
puts graph.config.join $/
|
||||
}
|
||||
else
|
||||
data = netstat_s(proto_name)
|
||||
graphs_for(proto_name).each { |graph|
|
||||
puts graph.fetch(data).join $/
|
||||
}
|
||||
|
||||
warn "not parsed:\n#{data.join}" unless data.empty? if $debug_mode
|
||||
end
|
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 19 KiB |