root / plugins / nginx / nginx_upstream_multi_ @ 9f85e0ae
Historique | Voir | Annoter | Télécharger (13,5 ko)
| 1 | 6c765698 | majesty | #!/usr/bin/env python |
|---|---|---|---|
| 2 | # -*- coding: utf-8 -*- |
||
| 3 | # vim: set fileencoding=utf-8 |
||
| 4 | # |
||
| 5 | # Munin plugin to monitor requests number, cache statuses, http status codes and average request times of |
||
| 6 | # specified nginx upstreams. |
||
| 7 | # |
||
| 8 | # Copyright Igor Borodikhin |
||
| 9 | # |
||
| 10 | # License : GPLv3 |
||
| 11 | # |
||
| 12 | # Configuration parameters: |
||
| 13 | # env.graphs - which graphs to produce (optional, list of graphs separated by spaces, default - cache http time request) |
||
| 14 | # env.log - log file path (mandatory, ex.: /var/log/nginx/upstream.log) |
||
| 15 | # env.upstream - list of upstreams to monitor (mandatory, including port numbers separated by space, ex.: 10.0.0.1:80 10.0.0.2:8080) |
||
| 16 | # env.statuses - list of http status codes to monitor (optional, default - all statuses, ex.: 200 403 404 410 500 502) |
||
| 17 | # env.percentiles - which percentiles to draw on time graphs (optional, list of percentiles separated by spaces, default - 80) |
||
| 18 | # |
||
| 19 | # ## Installation |
||
| 20 | # Copy file to directory /usr/share/munin/pligins/ and create symbolic link(s) for each log file you wish to monitor. |
||
| 21 | # |
||
| 22 | # Specify log_format at /etc/nginx/conf.d/upstream.conf: |
||
| 23 | # log_format upstream "ua=[$upstream_addr] ut=[$upstream_response_time] us=[$upstream_status] cs=[$upstream_cache_status]" |
||
| 24 | # |
||
| 25 | # Use it in your site configuration (/etc/nginx/sites-enabled/anything.conf): |
||
| 26 | # access_log /var/log/nginx/upstream.log upstream; |
||
| 27 | # |
||
| 28 | # And specify some options in munin-node.conf: |
||
| 29 | # |
||
| 30 | # [nginx_upstream_multi_upstream] |
||
| 31 | # env.graphs cache http time request |
||
| 32 | # env.log /var/log/nginx/upstream.log |
||
| 33 | # env.upstream 10.0.0.1:80 10.0.0.2:8080 unix:/tmp/upstream3 |
||
| 34 | # env.statuses 200 403 404 410 500 502 |
||
| 35 | # env.percentiles 50 80 |
||
| 36 | # |
||
| 37 | #%# family=contrib |
||
| 38 | |||
| 39 | import os, sys, re, copy, math |
||
| 40 | from time import time |
||
| 41 | |||
| 42 | # How we've been called |
||
| 43 | progName = sys.argv[0] |
||
| 44 | progName = progName[progName.rfind("/")+1:]
|
||
| 45 | |||
| 46 | # Where to store plugin state |
||
| 47 | if "MUNIN_PLUGSTATE" in os.environ: |
||
| 48 | stateDir = os.environ["MUNIN_PLUGSTATE"] |
||
| 49 | else: |
||
| 50 | stateDir = None |
||
| 51 | |||
| 52 | # Which site configuration we should use |
||
| 53 | siteName = progName[len("nginx_upstream_multi_"):]
|
||
| 54 | |||
| 55 | # Log path |
||
| 56 | if "log" in os.environ: |
||
| 57 | logPath = os.environ["log"] |
||
| 58 | else: |
||
| 59 | logPath = "/var/log/nginx/access.log" |
||
| 60 | |||
| 61 | # Http statuses list |
||
| 62 | httpStatusString = ("100:Continue;101:Switching protocols;102:Processing;200:OK;201:Created;202:Accepted;"
|
||
| 63 | "203:Non-Authoritative Information;204:No content;205:Reset content;206:Partial content;207:Multi-status;" |
||
| 64 | "226:IM used;300:Multiple choices;301:Moved permanently;302:Moved temporarily;303:See other;304:Not modified;" |
||
| 65 | "305:Use proxy;307:Temporary redirect;400:Bad request;401:Unauthorized;402:Payment required;403:Forbidden;" |
||
| 66 | "404:Not found;405:Method not allowed;406:Not acceptable;407:Proxy Authentication Required;408:Request timeout;" |
||
| 67 | "409:Conflict;410:Gone;411:Length required;412:Precondition failed;413:Request entity too large;" |
||
| 68 | "414:Request URI too large;415:Usupported media type;416:Request range not satisfiable;417:Expectation failed;" |
||
| 69 | "422:Unprocessable entity;423:Locked;424:Failed dependency;425:Unordered collection;426:Upgrade required;" |
||
| 70 | "449:Retry with;456:Unrecoverable error;500:Internal server error;501:Not implemented;502:Bad gateway;" |
||
| 71 | "503:Service unavailable;504:Gateway timeout;505:HTTP version not supported;506:Variant also negotiates;" |
||
| 72 | "507:Insufficient storage;508:Loop detected;509:Bandwidth limit exceeded;510:Not extended") |
||
| 73 | |||
| 74 | if "statuses" in os.environ: |
||
| 75 | statuses = os.environ["statuses"].split() |
||
| 76 | else: |
||
| 77 | statuses = [] |
||
| 78 | |||
| 79 | httpStatusList = {}
|
||
| 80 | for statusString in httpStatusString.split(";"):
|
||
| 81 | [code, title] = statusString.split(":")
|
||
| 82 | if len(statuses) > 0 and code in statuses or len(statuses) == 0: |
||
| 83 | httpStatusList[code] = {
|
||
| 84 | "title" : title, |
||
| 85 | "requests" : 0 |
||
| 86 | } |
||
| 87 | |||
| 88 | cacheStatusList = { "MISS" : 0, "BYPASS" : 0, "EXPIRED" : 0, "UPDATING" : 0, "STALE" : 0, "HIT" : 0 }
|
||
| 89 | |||
| 90 | # Parse upstreams |
||
| 91 | upstreams = {}
|
||
| 92 | if "upstream" in os.environ: |
||
| 93 | upstreamString = os.environ["upstream"] |
||
| 94 | upstreamList = upstreamString.split() |
||
| 95 | for upstream in upstreamList: |
||
| 96 | upstreams[upstream] = {
|
||
| 97 | "requests" : 0, |
||
| 98 | "time" : 0, |
||
| 99 | "times" : [], |
||
| 100 | "cache" : copy.deepcopy(cacheStatusList), |
||
| 101 | "http" : copy.deepcopy(httpStatusList) |
||
| 102 | } |
||
| 103 | else: |
||
| 104 | raise Exception("No upstreams specified")
|
||
| 105 | |||
| 106 | if "percentiles" in os.environ: |
||
| 107 | percentiles = os.environ["percentiles"].split() |
||
| 108 | else: |
||
| 109 | percentiles = [80] |
||
| 110 | |||
| 111 | if "graphs" in os.environ: |
||
| 112 | graphs_enabled = os.environ["graphs"].split() |
||
| 113 | else: |
||
| 114 | graphs_enabled = ["cache", "http", "time", "request"] |
||
| 115 | |||
| 116 | now = int(time()) |
||
| 117 | |||
| 118 | lastBytePath = "%s/nginx_upstream_multi_%s_lastByte.txt" % (stateDir, siteName) |
||
| 119 | try: |
||
| 120 | lastRun = os.path.getmtime(lastBytePath) |
||
| 121 | except OSError: |
||
| 122 | lastRun = now |
||
| 123 | |||
| 124 | |||
| 125 | def sanitize(string): |
||
| 126 | return string.replace(".", "_").replace(":", "_").replace("/", "_").replace("-", "_")
|
||
| 127 | |||
| 128 | if len(sys.argv) == 2 and sys.argv[1] == "config": |
||
| 129 | # Parent graph declaration |
||
| 130 | print "multigraph nginx_upstream_multi_%s" % siteName.replace(".", "_")
|
||
| 131 | print "graph_title Requests number" |
||
| 132 | print "graph_vlabel rps" |
||
| 133 | print "graph_category nginx" |
||
| 134 | for upstream in upstreams.keys(): |
||
| 135 | print "us%s_requests.label %s" % (sanitize(upstream), upstream) |
||
| 136 | |||
| 137 | # Requests graph declaration |
||
| 138 | if "request" in graphs_enabled: |
||
| 139 | for upstream in upstreams.keys(): |
||
| 140 | print "" |
||
| 141 | print "multigraph nginx_upstream_multi_%s.%s_requests" % (sanitize(siteName), sanitize(upstream)) |
||
| 142 | print "graph_title Requests number - %s" % upstream |
||
| 143 | print "graph_vlabel rps" |
||
| 144 | print "graph_category nginx" |
||
| 145 | print "us%s_requests.label %s" % (sanitize(upstream), upstream) |
||
| 146 | print "" |
||
| 147 | |||
| 148 | # Times graph declaration |
||
| 149 | if "time" in graphs_enabled: |
||
| 150 | for upstream in upstreams.keys(): |
||
| 151 | print "" |
||
| 152 | print "multigraph nginx_upstream_multi_%s.%s_times" % (sanitize(siteName), sanitize(upstream)) |
||
| 153 | print "graph_title Request time - %s" % upstream |
||
| 154 | print "graph_vlabel sec." |
||
| 155 | print "graph_category nginx" |
||
| 156 | print "us%s_times.label average" % (sanitize(upstream)) |
||
| 157 | for percentile in percentiles: |
||
| 158 | print "us%s_times_percentile_%s.label %s-percentile" % (sanitize(upstream), percentile, percentile) |
||
| 159 | print "" |
||
| 160 | |||
| 161 | # HTTP Status codes graph declaration |
||
| 162 | if "http" in graphs_enabled: |
||
| 163 | for upstream in upstreams.keys(): |
||
| 164 | print "" |
||
| 165 | print "multigraph nginx_upstream_multi_%s.%s_statuses" % (sanitize(siteName), sanitize(upstream)) |
||
| 166 | print "graph_title HTTP - %s" % upstream |
||
| 167 | print "graph_vlabel rps" |
||
| 168 | print "graph_category nginx" |
||
| 169 | keylist = httpStatusList.keys() |
||
| 170 | keylist.sort() |
||
| 171 | for status in keylist: |
||
| 172 | print "http%s_%s_status.label %s - %s" % (status, sanitize(upstream), status, httpStatusList[status]["title"]) |
||
| 173 | print "" |
||
| 174 | |||
| 175 | # Cache status graph declaration |
||
| 176 | if "cache" in graphs_enabled: |
||
| 177 | for upstream in upstreams.keys(): |
||
| 178 | print "" |
||
| 179 | print "multigraph nginx_upstream_multi_%s.%s_cache" % (sanitize(siteName), sanitize(upstream)) |
||
| 180 | print "graph_title Cache - %s" % upstream |
||
| 181 | print "graph_vlabel rps" |
||
| 182 | print "graph_category nginx" |
||
| 183 | for status in cacheStatusList: |
||
| 184 | print "us%s_%s_cache.label %s" % (sanitize(status), sanitize(upstream), status) |
||
| 185 | print "" |
||
| 186 | else: |
||
| 187 | timeElapsed = now - lastRun |
||
| 188 | |||
| 189 | lastByteHandle = None |
||
| 190 | |||
| 191 | try: |
||
| 192 | lastByteHandle = open(lastBytePath, "r") |
||
| 193 | lastByte = int(lastByteHandle.read()) |
||
| 194 | except Exception: |
||
| 195 | lastByte = 0 |
||
| 196 | |||
| 197 | if lastByteHandle != None: |
||
| 198 | lastByteHandle.close() |
||
| 199 | |||
| 200 | try: |
||
| 201 | logHandle = open(logPath, "r") |
||
| 202 | except Exception: |
||
| 203 | print "Log file %s not readable" % logPath |
||
| 204 | sys.exit(1) |
||
| 205 | |||
| 206 | try: |
||
| 207 | logSize = int(os.path.getsize(logPath)) |
||
| 208 | except ValueError: |
||
| 209 | logSize = 0 |
||
| 210 | |||
| 211 | if logSize < lastByte: |
||
| 212 | lastByte = 0 |
||
| 213 | |||
| 214 | regExp = re.compile(r"ua=\[(.*?)\]\s+ut=\[(.*?)\]\s+us=\[(.*?)\]\s+cs=\[(.*?)\]") |
||
| 215 | |||
| 216 | logHandle.seek(lastByte) |
||
| 217 | for line in logHandle: |
||
| 218 | match = regExp.search(line) |
||
| 219 | if (match): |
||
| 220 | # Extract data |
||
| 221 | address = match.group(1) |
||
| 222 | time = match.group(2) |
||
| 223 | status = match.group(3) |
||
| 224 | cache = match.group(4) |
||
| 225 | |||
| 226 | # Replace separators by space |
||
| 227 | address = address.replace(",", " ")
|
||
| 228 | address = address.replace(" : ", " ")
|
||
| 229 | address = re.sub("\s+", " ", address)
|
||
| 230 | |||
| 231 | time = time.replace(",", " ")
|
||
| 232 | time = time.replace(" : ", " ")
|
||
| 233 | time = re.sub("\s+", " ", time)
|
||
| 234 | |||
| 235 | status = status.replace(",", " ")
|
||
| 236 | status = status.replace(" : ", " ")
|
||
| 237 | status = re.sub("\s+", " ", status)
|
||
| 238 | |||
| 239 | cache = cache.replace(",", " ")
|
||
| 240 | cache = cache.replace(" : ", " ")
|
||
| 241 | cache = re.sub("\s+", " ", cache)
|
||
| 242 | |||
| 243 | addresses = address.split() |
||
| 244 | times = time.split() |
||
| 245 | statuses = status.split() |
||
| 246 | caches = cache.split() |
||
| 247 | |||
| 248 | index = 0 |
||
| 249 | for uAddress in addresses: |
||
| 250 | if uAddress in upstreams.keys(): |
||
| 251 | try: |
||
| 252 | uTime = float(times[index]) |
||
| 253 | except ValueError: |
||
| 254 | uTime = 0 |
||
| 255 | |||
| 256 | if index < len(statuses): |
||
| 257 | uStatus = statuses[index] |
||
| 258 | else: |
||
| 259 | uStatus = "-" |
||
| 260 | |||
| 261 | if index < len(caches): |
||
| 262 | uCache = caches[index] |
||
| 263 | else: |
||
| 264 | uCache = "-" |
||
| 265 | |||
| 266 | if uAddress != "-": |
||
| 267 | upstreams[uAddress]["requests"] += 1 |
||
| 268 | if uTime != "-": |
||
| 269 | upstreams[uAddress]["time"] += uTime |
||
| 270 | upstreams[uAddress]["times"].append(uTime) |
||
| 271 | if uStatus != "-" and uStatus in upstreams[uAddress]["http"].keys(): |
||
| 272 | upstreams[uAddress]["http"][uStatus]["requests"] += 1 |
||
| 273 | if uCache != "-": |
||
| 274 | upstreams[uAddress]["cache"][uCache] += 1 |
||
| 275 | index += 1 |
||
| 276 | |||
| 277 | try: |
||
| 278 | lastByteHandle = open(lastBytePath, "w") |
||
| 279 | lastByteHandle.write(str(logHandle.tell())) |
||
| 280 | lastByteHandle.close() |
||
| 281 | except Exception: |
||
| 282 | sys.exit(1) |
||
| 283 | |||
| 284 | logHandle.close() |
||
| 285 | |||
| 286 | # Parent graph data |
||
| 287 | for upstream in upstreams.keys(): |
||
| 288 | value = 0 |
||
| 289 | if timeElapsed > 0: |
||
| 290 | value = upstreams[upstream]["requests"] / timeElapsed |
||
| 291 | |||
| 292 | print "us%s_requests.value %s" % (sanitize(upstream), value) |
||
| 293 | |||
| 294 | # Requests graph data |
||
| 295 | if "request" in graphs_enabled: |
||
| 296 | for upstream in upstreams.keys(): |
||
| 297 | print "" |
||
| 298 | print "multigraph nginx_upstream_multi_%s.%s_requests" % (sanitize(siteName), sanitize(upstream)) |
||
| 299 | |||
| 300 | value = 0 |
||
| 301 | if timeElapsed > 0: |
||
| 302 | value = upstreams[upstream]["requests"] / timeElapsed |
||
| 303 | |||
| 304 | print "us%s_requests.value %s" % (sanitize(upstream), value) |
||
| 305 | print "" |
||
| 306 | |||
| 307 | # Times graph data |
||
| 308 | if "time" in graphs_enabled: |
||
| 309 | for upstream in upstreams.keys(): |
||
| 310 | uTime = 0 |
||
| 311 | if upstreams[upstream]["requests"] > 0: |
||
| 312 | uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"] |
||
| 313 | upstreams[upstream]["times"].sort() |
||
| 314 | print "" |
||
| 315 | print "multigraph nginx_upstream_multi_%s.%s_times" % (sanitize(siteName), sanitize(upstream)) |
||
| 316 | print "us%s_times.value %s" % (sanitize(upstream), uTime) |
||
| 317 | for percentile in percentiles: |
||
| 318 | percentileValue = 0 |
||
| 319 | if upstreams[upstream]["requests"] > 0: |
||
| 320 | uTime = upstreams[upstream]["time"] / upstreams[upstream]["requests"] |
||
| 321 | percentileKey = int(percentile) * len(upstreams[upstream]["times"]) / 100 |
||
| 322 | if len(upstreams[upstream]["times"])%2 > 0: |
||
| 323 | low = int(math.floor(percentileKey)) |
||
| 324 | high = int(math.ceil(percentileKey)) |
||
| 325 | percentileValue = (upstreams[upstream]["times"][low] + upstreams[upstream]["times"][high]) / 2 |
||
| 326 | else: |
||
| 327 | percentileValue = upstreams[upstream]["times"][int(percentileKey)] |
||
| 328 | print "us%s_times_percentile_%s.value %s" % (sanitize(upstream), percentile, percentileValue) |
||
| 329 | print "" |
||
| 330 | |||
| 331 | # HTTP Status codes graph data |
||
| 332 | if "http" in graphs_enabled: |
||
| 333 | for upstream in upstreams.keys(): |
||
| 334 | print "" |
||
| 335 | print "multigraph nginx_upstream_multi_%s.%s_statuses" % (sanitize(siteName), sanitize(upstream)) |
||
| 336 | keylist = httpStatusList.keys() |
||
| 337 | keylist.sort() |
||
| 338 | for status in keylist: |
||
| 339 | value = 0 |
||
| 340 | if timeElapsed > 0: |
||
| 341 | value = upstreams[upstream]["http"][status]["requests"] / timeElapsed |
||
| 342 | |||
| 343 | print "http%s_%s_status.value %s" % (status, sanitize(upstream), value) |
||
| 344 | print "" |
||
| 345 | |||
| 346 | # Cache status graph data |
||
| 347 | if "cache" in graphs_enabled: |
||
| 348 | for upstream in upstreams.keys(): |
||
| 349 | print "" |
||
| 350 | print "multigraph nginx_upstream_multi_%s.%s_cache" % (sanitize(siteName), sanitize(upstream)) |
||
| 351 | for status in cacheStatusList: |
||
| 352 | value = 0 |
||
| 353 | if timeElapsed > 0: |
||
| 354 | value = upstreams[upstream]["cache"][status] / timeElapsed |
||
| 355 | |||
| 356 | print "us%s_%s_cache.value %s" % (sanitize(status), sanitize(upstream), value) |
||
| 357 | print "" |
