1
0
Fork 0
mirror of https://github.com/munin-monitoring/contrib.git synced 2025-07-22 02:51:03 +00:00

nginx_upstream_multi_: fix flake8 issues

This commit is contained in:
Lars Kruse 2018-12-20 15:06:52 +01:00
parent 13bd1599b0
commit 5f9e882bce

View file

@ -1,29 +1,36 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# #
# Munin plugin to monitor requests number, cache statuses, http status codes and average request times of # Munin plugin to monitor requests number, cache statuses, http status codes and average request
# specified nginx upstreams. # times of specified nginx upstreams.
# #
# Copyright Igor Borodikhin # Copyright Igor Borodikhin
# #
# License : GPLv3 # License : GPLv3
# #
# Configuration parameters: # Configuration parameters:
# env.graphs - which graphs to produce (optional, list of graphs separated by spaces, default - cache http time request) # env.graphs - which graphs to produce (optional, list of graphs separated by spaces, default -
# cache http time request)
# env.log - log file path (mandatory, ex.: /var/log/nginx/upstream.log) # env.log - log file path (mandatory, ex.: /var/log/nginx/upstream.log)
# env.upstream - list of upstreams to monitor (mandatory, including port numbers separated by space, ex.: 10.0.0.1:80 10.0.0.2:8080) # env.upstream - list of upstreams to monitor (mandatory, including port numbers separated by
# env.statuses - list of http status codes to monitor (optional, default - all statuses, ex.: 200 403 404 410 500 502) # space, e.g.: 10.0.0.1:80 10.0.0.2:8080)
# env.percentiles - which percentiles to draw on time graphs (optional, list of percentiles separated by spaces, default - 80) # env.statuses - list of http status codes to monitor (optional, default - all statuses,
# e.g.: 200 403 404 410 500 502)
# env.percentiles - which percentiles to draw on time graphs (optional, list of percentiles
# separated by spaces, default - 80)
# #
# ## Installation # ## Installation
# Copy file to directory /usr/share/munin/pligins/ and create symbolic link(s) for each log file you wish to monitor. # Copy file to directory /usr/share/munin/pligins/ and create symbolic link(s) for each log file
# you wish to monitor.
# #
# Specify log_format at /etc/nginx/conf.d/upstream.conf: # Specify log_format at /etc/nginx/conf.d/upstream.conf:
# log_format upstream "ua=[$upstream_addr] ut=[$upstream_response_time] us=[$upstream_status] cs=[$upstream_cache_status]" # log_format upstream "ua=[$upstream_addr] ut=[$upstream_response_time] us=[$upstream_status] \
# cs=[$upstream_cache_status]"
# #
# Use it in your site configuration (/etc/nginx/sites-enabled/anything.conf): # Use it in your site configuration (/etc/nginx/sites-enabled/anything.conf):
# access_log /var/log/nginx/upstream.log upstream; # access_log /var/log/nginx/upstream.log upstream;
# #
# Attention! Because munin-node does not have read permission for nginx log files we need to run it as root. # Attention! Since the default user (nobody) does not have read permission for nginx log files we
# need to run it as root.
# #
# And specify some options in /etc/munin/plugin-conf.d/munin-node: # And specify some options in /etc/munin/plugin-conf.d/munin-node:
# #
@ -35,7 +42,7 @@
# env.statuses 200 403 404 410 500 502 # env.statuses 200 403 404 410 500 502
# env.percentiles 50 80 # env.percentiles 50 80
# #
#%# family=contrib # #%# family=contrib
import copy import copy
import math import math
@ -66,17 +73,21 @@ else:
logPath = "/var/log/nginx/access.log" logPath = "/var/log/nginx/access.log"
# Http statuses list # Http statuses list
httpStatusString = ("100:Continue;101:Switching protocols;102:Processing;200:OK;201:Created;202:Accepted;" httpStatusString = (
"203:Non-Authoritative Information;204:No content;205:Reset content;206:Partial content;207:Multi-status;" "100:Continue;101:Switching protocols;102:Processing;200:OK;201:Created;202:Accepted;"
"226:IM used;300:Multiple choices;301:Moved permanently;302:Moved temporarily;303:See other;304:Not modified;" "203:Non-Authoritative Information;204:No content;205:Reset content;206:Partial content;"
"305:Use proxy;307:Temporary redirect;400:Bad request;401:Unauthorized;402:Payment required;403:Forbidden;" "207:Multi-status;226:IM used;300:Multiple choices;301:Moved permanently;"
"404:Not found;405:Method not allowed;406:Not acceptable;407:Proxy Authentication Required;408:Request timeout;" "302:Moved temporarily;303:See other;304:Not modified;305:Use proxy;307:Temporary redirect;"
"409:Conflict;410:Gone;411:Length required;412:Precondition failed;413:Request entity too large;" "400:Bad request;401:Unauthorized;402:Payment required;403:Forbidden;404:Not found;"
"414:Request URI too large;415:Usupported media type;416:Request range not satisfiable;417:Expectation failed;" "405:Method not allowed;406:Not acceptable;407:Proxy Authentication Required;"
"422:Unprocessable entity;423:Locked;424:Failed dependency;425:Unordered collection;426:Upgrade required;" "408:Request timeout;409:Conflict;410:Gone;411:Length required;412:Precondition failed;"
"449:Retry with;456:Unrecoverable error;500:Internal server error;501:Not implemented;502:Bad gateway;" "413:Request entity too large;414:Request URI too large;415:Usupported media type;"
"503:Service unavailable;504:Gateway timeout;505:HTTP version not supported;506:Variant also negotiates;" "416:Request range not satisfiable;417:Expectation failed;422:Unprocessable entity;"
"507:Insufficient storage;508:Loop detected;509:Bandwidth limit exceeded;510:Not extended") "423:Locked;424:Failed dependency;425:Unordered collection;426:Upgrade required;"
"449:Retry with;456:Unrecoverable error;500:Internal server error;501:Not implemented;"
"502:Bad gateway;503:Service unavailable;504:Gateway timeout;505:HTTP version not supported;"
"506:Variant also negotiates;507:Insufficient storage;508:Loop detected;"
"509:Bandwidth limit exceeded;510:Not extended")
if "statuses" in os.environ: if "statuses" in os.environ:
statuses = os.environ["statuses"].split() statuses = os.environ["statuses"].split()
@ -88,11 +99,11 @@ for statusString in httpStatusString.split(";"):
[code, title] = statusString.split(":") [code, title] = statusString.split(":")
if len(statuses) > 0 and code in statuses or len(statuses) == 0: if len(statuses) > 0 and code in statuses or len(statuses) == 0:
httpStatusList[code] = { httpStatusList[code] = {
"title" : title, "title": title,
"requests" : 0 "requests": 0
} }
cacheStatusList = { "MISS" : 0, "BYPASS" : 0, "EXPIRED" : 0, "UPDATING" : 0, "STALE" : 0, "HIT" : 0 } cacheStatusList = {"MISS": 0, "BYPASS": 0, "EXPIRED": 0, "UPDATING": 0, "STALE": 0, "HIT": 0}
# Parse upstreams # Parse upstreams
upstreams = {} upstreams = {}
@ -101,11 +112,11 @@ if "upstream" in os.environ:
upstreamList = upstreamString.split() upstreamList = upstreamString.split()
for upstream in upstreamList: for upstream in upstreamList:
upstreams[upstream] = { upstreams[upstream] = {
"requests" : 0, "requests": 0,
"time" : 0, "time": 0,
"times" : [], "times": [],
"cache" : copy.deepcopy(cacheStatusList), "cache": copy.deepcopy(cacheStatusList),
"http" : copy.deepcopy(httpStatusList) "http": copy.deepcopy(httpStatusList)
} }
else: else:
raise Exception("No upstreams specified") raise Exception("No upstreams specified")
@ -132,6 +143,7 @@ except OSError:
def sanitize(string): def sanitize(string):
return string.replace(".", "_").replace(":", "_").replace("/", "_").replace("-", "_") return string.replace(".", "_").replace(":", "_").replace("/", "_").replace("-", "_")
if len(sys.argv) == 2 and sys.argv[1] == "config": if len(sys.argv) == 2 and sys.argv[1] == "config":
# Parent graph declaration # Parent graph declaration
print("multigraph nginx_upstream_multi_%s" % siteName.replace(".", "_")) print("multigraph nginx_upstream_multi_%s" % siteName.replace(".", "_"))
@ -145,7 +157,8 @@ if len(sys.argv) == 2 and sys.argv[1] == "config":
if "request" in graphs_enabled: if "request" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_requests" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_requests"
% (sanitize(siteName), sanitize(upstream)))
print("graph_title Requests number - %s" % upstream) print("graph_title Requests number - %s" % upstream)
print("graph_vlabel rps") print("graph_vlabel rps")
print("graph_category webserver") print("graph_category webserver")
@ -156,32 +169,37 @@ if len(sys.argv) == 2 and sys.argv[1] == "config":
if "time" in graphs_enabled: if "time" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_times" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_times"
% (sanitize(siteName), sanitize(upstream)))
print("graph_title Request time - %s" % upstream) print("graph_title Request time - %s" % upstream)
print("graph_vlabel sec.") print("graph_vlabel sec.")
print("graph_category webserver") print("graph_category webserver")
print("us%s_times.label average" % (sanitize(upstream))) print("us%s_times.label average" % (sanitize(upstream)))
for percentile in percentiles: for percentile in percentiles:
print("us%s_times_percentile_%s.label %s-percentile" % (sanitize(upstream), percentile, percentile)) print("us%s_times_percentile_%s.label %s-percentile"
% (sanitize(upstream), percentile, percentile))
print() print()
# HTTP Status codes graph declaration # HTTP Status codes graph declaration
if "http" in graphs_enabled: if "http" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_statuses" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_statuses"
% (sanitize(siteName), sanitize(upstream)))
print("graph_title HTTP - %s" % upstream) print("graph_title HTTP - %s" % upstream)
print("graph_vlabel rps") print("graph_vlabel rps")
print("graph_category webserver") print("graph_category webserver")
for status in sorted(httpStatusList.keys()): for status in sorted(httpStatusList.keys()):
print("http%s_%s_status.label %s - %s" % (status, sanitize(upstream), status, httpStatusList[status]["title"])) print("http%s_%s_status.label %s - %s"
% (status, sanitize(upstream), status, httpStatusList[status]["title"]))
print() print()
# Cache status graph declaration # Cache status graph declaration
if "cache" in graphs_enabled: if "cache" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_cache" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_cache"
% (sanitize(siteName), sanitize(upstream)))
print("graph_title Cache - %s" % upstream) print("graph_title Cache - %s" % upstream)
print("graph_vlabel rps") print("graph_vlabel rps")
print("graph_category webserver") print("graph_category webserver")
@ -199,7 +217,7 @@ else:
except Exception: except Exception:
lastByte = 0 lastByte = 0
if lastByteHandle != None: if lastByteHandle is not None:
lastByteHandle.close() lastByteHandle.close()
try: try:
@ -224,59 +242,59 @@ else:
if (match): if (match):
# Extract data # Extract data
address = match.group(1) address = match.group(1)
time = match.group(2) time = match.group(2)
status = match.group(3) status = match.group(3)
cache = match.group(4) cache = match.group(4)
# Replace separators by space # Replace separators by space
address = address.replace(",", " ") address = address.replace(",", " ")
address = address.replace(" : ", " ") address = address.replace(" : ", " ")
address = re.sub("\s+", " ", address) address = re.sub(r"\s+", " ", address)
time = time.replace(",", " ") time = time.replace(",", " ")
time = time.replace(" : ", " ") time = time.replace(" : ", " ")
time = re.sub("\s+", " ", time) time = re.sub(r"\s+", " ", time)
status = status.replace(",", " ") status = status.replace(",", " ")
status = status.replace(" : ", " ") status = status.replace(" : ", " ")
status = re.sub("\s+", " ", status) status = re.sub(r"\s+", " ", status)
cache = cache.replace(",", " ") cache = cache.replace(",", " ")
cache = cache.replace(" : ", " ") cache = cache.replace(" : ", " ")
cache = re.sub("\s+", " ", cache) cache = re.sub(r"\s+", " ", cache)
addresses = address.split() addresses = address.split()
times = time.split() times = time.split()
statuses = status.split() statuses = status.split()
caches = cache.split() caches = cache.split()
index = 0 index = 0
for uAddress in addresses: for uAddress in addresses:
if uAddress in upstreams.keys(): if uAddress in upstreams.keys():
try: try:
uTime = float(times[index]) uTime = float(times[index])
except ValueError: except ValueError:
uTime = 0 uTime = 0
if index < len(statuses): if index < len(statuses):
uStatus = statuses[index] uStatus = statuses[index]
else: else:
uStatus = "-" uStatus = "-"
if index < len(caches): if index < len(caches):
uCache = caches[index] uCache = caches[index]
else: else:
uCache = "-" uCache = "-"
if uAddress != "-": if uAddress != "-":
upstreams[uAddress]["requests"] += 1 upstreams[uAddress]["requests"] += 1
if uTime != "-": if uTime != "-":
upstreams[uAddress]["time"] += uTime upstreams[uAddress]["time"] += uTime
upstreams[uAddress]["times"].append(uTime) upstreams[uAddress]["times"].append(uTime)
if uStatus != "-" and uStatus in upstreams[uAddress]["http"].keys(): if uStatus != "-" and uStatus in upstreams[uAddress]["http"].keys():
upstreams[uAddress]["http"][uStatus]["requests"] += 1 upstreams[uAddress]["http"][uStatus]["requests"] += 1
if uCache != "-": if uCache != "-":
upstreams[uAddress]["cache"][uCache] += 1 upstreams[uAddress]["cache"][uCache] += 1
index += 1 index += 1
try: try:
@ -301,12 +319,11 @@ else:
if "request" in graphs_enabled: if "request" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_requests" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_requests"
% (sanitize(siteName), sanitize(upstream)))
value = 0 value = 0
if timeElapsed > 0: if timeElapsed > 0:
value = upstreams[upstream]["requests"] / timeElapsed value = upstreams[upstream]["requests"] / timeElapsed
print("us%s_requests.value %s" % (sanitize(upstream), value)) print("us%s_requests.value %s" % (sanitize(upstream), value))
print() print()
@ -318,27 +335,31 @@ else:
uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"] uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"]
upstreams[upstream]["times"].sort() upstreams[upstream]["times"].sort()
print() print()
print("multigraph nginx_upstream_multi_%s.%s_times" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_times"
% (sanitize(siteName), sanitize(upstream)))
print("us%s_times.value %s" % (sanitize(upstream), uTime)) print("us%s_times.value %s" % (sanitize(upstream), uTime))
for percentile in percentiles: for percentile in percentiles:
percentileValue = 0 percentileValue = 0
if upstreams[upstream]["requests"] > 0: if upstreams[upstream]["requests"] > 0:
uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"] uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"]
percentileKey = int(percentile) * len(upstreams[upstream]["times"]) / 100 percentileKey = int(percentile) * len(upstreams[upstream]["times"]) / 100
if len(upstreams[upstream]["times"])%2 > 0: if len(upstreams[upstream]["times"]) % 2 > 0:
low = int(math.floor(percentileKey)) low = int(math.floor(percentileKey))
high = int(math.ceil(percentileKey)) high = int(math.ceil(percentileKey))
percentileValue = (upstreams[upstream]["times"][low] + upstreams[upstream]["times"][high]) / 2 percentileValue = (upstreams[upstream]["times"][low]
+ upstreams[upstream]["times"][high]) / 2
else: else:
percentileValue = upstreams[upstream]["times"][int(percentileKey)] percentileValue = upstreams[upstream]["times"][int(percentileKey)]
print("us%s_times_percentile_%s.value %s" % (sanitize(upstream), percentile, percentileValue)) print("us%s_times_percentile_%s.value %s"
% (sanitize(upstream), percentile, percentileValue))
print() print()
# HTTP Status codes graph data # HTTP Status codes graph data
if "http" in graphs_enabled: if "http" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_statuses" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_statuses"
% (sanitize(siteName), sanitize(upstream)))
for status in sorted(httpStatusList.keys()): for status in sorted(httpStatusList.keys()):
value = 0 value = 0
if timeElapsed > 0: if timeElapsed > 0:
@ -351,7 +372,8 @@ else:
if "cache" in graphs_enabled: if "cache" in graphs_enabled:
for upstream in upstreams.keys(): for upstream in upstreams.keys():
print() print()
print("multigraph nginx_upstream_multi_%s.%s_cache" % (sanitize(siteName), sanitize(upstream))) print("multigraph nginx_upstream_multi_%s.%s_cache"
% (sanitize(siteName), sanitize(upstream)))
for status in cacheStatusList: for status in cacheStatusList:
value = 0 value = 0
if timeElapsed > 0: if timeElapsed > 0: