Révision b0b39b01
Ruby plugins: apply style changes as suggested by "rubocop --fix-layout"
| plugins/network/netstat_s_ | ||
|---|---|---|
| 1 | 1 |
#!/usr/bin/env ruby |
| 2 | 2 |
|
| 3 |
# netstat_s revision 6 (Nov 2013) |
|
| 4 |
# |
|
| 5 |
# This plugin shows various statistics from 'netstat -s' |
|
| 6 |
# |
|
| 7 |
# Required privileges: none |
|
| 8 |
# |
|
| 9 |
# OS: |
|
| 10 |
# Supposed: BSD, Linux (only a few items, see netstat_multi for more) |
|
| 11 |
# Tested: FreeBSD: 8.2, 8.3, 9.1 |
|
| 12 |
# Linux : Debian 6 (kernel 2.6.32), Arch (kernel 3.11.6), CentOS 6 |
|
| 13 |
# |
|
| 14 |
# Author: Artem Sheremet <dot.doom@gmail.com> |
|
| 15 |
# |
|
| 3 |
=begin |
|
| 4 |
|
|
| 5 |
netstat_s revision 6 (Nov 2013) |
|
| 6 |
|
|
| 7 |
This plugin shows various statistics from 'netstat -s' |
|
| 8 |
|
|
| 9 |
Required privileges: none |
|
| 10 |
|
|
| 11 |
OS: |
|
| 12 |
Supposed: BSD, Linux (only a few items, see netstat_multi for more) |
|
| 13 |
Tested: FreeBSD: 8.2, 8.3, 9.1 |
|
| 14 |
Linux : Debian 6 (kernel 2.6.32), Arch (kernel 3.11.6), CentOS 6 |
|
| 15 |
|
|
| 16 |
Author: Artem Sheremet <dot.doom@gmail.com> |
|
| 17 |
|
|
| 16 | 18 |
|
| 17 | 19 |
#%# family=auto |
| 18 | 20 |
#%# capabilities=autoconf suggest |
| 19 | 21 |
|
| 22 |
=end |
|
| 23 |
|
|
| 20 | 24 |
# original filename |
| 21 | 25 |
PLUGIN_NAME = 'netstat_s_' |
| 22 | 26 |
|
| ... | ... | |
| 24 | 28 |
$debug_mode = ARGV.first == 'debug' |
| 25 | 29 |
|
| 26 | 30 |
class String |
| 27 |
def escape
|
|
| 28 |
self.gsub /[^\w]/, '_'
|
|
| 29 |
end
|
|
| 30 |
|
|
| 31 |
unless method_defined? :start_with?
|
|
| 32 |
def start_with?(str)
|
|
| 33 |
self[0...str.size] == str
|
|
| 34 |
end
|
|
| 35 |
end
|
|
| 36 |
|
|
| 37 |
unless method_defined? :lines
|
|
| 38 |
def lines
|
|
| 39 |
self.split($/).to_enum
|
|
| 40 |
end
|
|
| 41 |
end
|
|
| 31 |
def escape
|
|
| 32 |
self.gsub /[^\w]/, '_'
|
|
| 33 |
end
|
|
| 34 |
|
|
| 35 |
unless method_defined? :start_with?
|
|
| 36 |
def start_with?(str)
|
|
| 37 |
self[0...str.size] == str
|
|
| 38 |
end
|
|
| 39 |
end
|
|
| 40 |
|
|
| 41 |
unless method_defined? :lines
|
|
| 42 |
def lines
|
|
| 43 |
self.split($/).to_enum
|
|
| 44 |
end
|
|
| 45 |
end
|
|
| 42 | 46 |
end |
| 43 | 47 |
|
| 44 | 48 |
class Graph |
| 45 |
def initialize(name, protocol, parse_expr) |
|
| 46 |
@name, @protocol, @parse_expr = name, protocol, parse_expr |
|
| 47 |
end |
|
| 48 |
|
|
| 49 |
def config |
|
| 50 |
config_options = [] |
|
| 51 |
|
|
| 52 |
# first, build a list of multigraphs (one graph per unit) |
|
| 53 |
# Hash key is unit, and the value is array of labels |
|
| 54 |
multigraphs = {}
|
|
| 55 |
@parse_expr.each { |expr, descr|
|
|
| 56 |
next unless descr # no label - skip this entry |
|
| 57 |
descr.each { |entry|
|
|
| 58 |
labels_array = (multigraphs[entry[0]] ||= []) |
|
| 59 |
labels_array.push [entry[1], entry[2]] |
|
| 60 |
} |
|
| 61 |
} |
|
| 62 |
|
|
| 63 |
multigraphs.each_pair { |unit, labels_and_negatives|
|
|
| 64 |
# now just add options to the config |
|
| 65 |
|
|
| 66 |
config_options.concat [ |
|
| 67 |
"multigraph #{name(unit)}",
|
|
| 68 |
"graph_title Netstat: #{@protocol}: #{@name}#{" (#{unit})" if multigraphs.size > 1}",
|
|
| 69 |
"graph_category network", |
|
| 70 |
"graph_order #{labels_and_negatives.map { |label, _negative| label.escape }.join(' ')}"
|
|
| 71 |
] |
|
| 72 |
|
|
| 73 |
config_options.push "graph_args --base 1024" if unit == :bytes |
|
| 74 |
has_negatives = false |
|
| 75 |
|
|
| 76 |
labels_and_negatives.each { |label, negative|
|
|
| 77 |
label_esc = label.escape |
|
| 78 |
has_negatives = true unless negative == nil |
|
| 79 |
|
|
| 80 |
if negative == true |
|
| 81 |
# the value has no opposite and is negative |
|
| 82 |
config_options.concat [ |
|
| 83 |
"#{label_esc}.graph no",
|
|
| 84 |
"#{label_esc}_neg.type DERIVE",
|
|
| 85 |
"#{label_esc}_neg.min 0",
|
|
| 86 |
"#{label_esc}_neg.draw LINE",
|
|
| 87 |
"#{label_esc}_neg.label #{label}",
|
|
| 88 |
"#{label_esc}_neg.negative #{label_esc}"
|
|
| 89 |
] |
|
| 90 |
else |
|
| 91 |
config_options.concat [ |
|
| 92 |
"#{label_esc}.type DERIVE",
|
|
| 93 |
"#{label_esc}.min 0",
|
|
| 94 |
"#{label_esc}.draw LINE",
|
|
| 95 |
"#{label_esc}.label #{label}"
|
|
| 96 |
] |
|
| 97 |
end |
|
| 98 |
|
|
| 99 |
if negative == false |
|
| 100 |
# the value has no opposite and is positive |
|
| 101 |
config_options.concat [ |
|
| 102 |
"#{label_esc}_neg.graph off",
|
|
| 103 |
"#{label_esc}.negative #{label_esc}_neg"
|
|
| 104 |
] |
|
| 105 |
elsif negative |
|
| 106 |
negative_esc = negative.escape |
|
| 107 |
config_options.concat [ |
|
| 108 |
"#{label_esc}.negative #{negative_esc}",
|
|
| 109 |
"#{negative_esc}.graph no"
|
|
| 110 |
] |
|
| 111 |
end |
|
| 112 |
} |
|
| 113 |
|
|
| 114 |
config_options.push "graph_vlabel per second#{" in (-) / out (+)" if has_negatives}"
|
|
| 115 |
} |
|
| 116 |
|
|
| 117 |
config_options |
|
| 118 |
end |
|
| 119 |
|
|
| 120 |
def fetch(data) |
|
| 121 |
output_data = [] |
|
| 122 |
|
|
| 123 |
# first build a set of multigraphs, one per unit. |
|
| 124 |
# Hash key is unit, and the value is a hash of 'escaped label' => 'value' |
|
| 125 |
multigraphs = {}
|
|
| 126 |
@parse_expr.each { |expr, descr|
|
|
| 127 |
next unless descr # no label - skip this entry |
|
| 128 |
index = data.index { |line| line =~ expr }
|
|
| 129 |
if index |
|
| 130 |
data.delete_at index |
|
| 131 |
$~[1..-1].zip(descr).each { |value, info|
|
|
| 132 |
unit, label = info |
|
| 133 |
(multigraphs[unit] ||= {})[label.escape] = value
|
|
| 134 |
} |
|
| 135 |
else |
|
| 136 |
warn "no line found for #{expr}, #{descr}" if $debug_mode
|
|
| 137 |
end |
|
| 138 |
} |
|
| 139 |
|
|
| 140 |
multigraphs.each_pair { |unit, values|
|
|
| 141 |
output_data.push "multigraph #{name(unit)}"
|
|
| 142 |
output_data += values.map { |label, value| "#{label}.value #{value}" }
|
|
| 143 |
} |
|
| 144 |
|
|
| 145 |
output_data |
|
| 146 |
end |
|
| 147 |
|
|
| 148 |
def name(unit) |
|
| 149 |
"#{PLUGIN_NAME}#{@protocol}_#{@name.escape}_#{unit}"
|
|
| 150 |
end |
|
| 49 |
def initialize(name, protocol, parse_expr) |
|
| 50 |
@name, @protocol, @parse_expr = name, protocol, parse_expr |
|
| 51 |
end |
|
| 52 |
|
|
| 53 |
def config |
|
| 54 |
config_options = [] |
|
| 55 |
|
|
| 56 |
# first, build a list of multigraphs (one graph per unit) |
|
| 57 |
# Hash key is unit, and the value is array of labels |
|
| 58 |
multigraphs = {}
|
|
| 59 |
@parse_expr.each { |expr, descr|
|
|
| 60 |
next unless descr # no label - skip this entry |
|
| 61 |
|
|
| 62 |
descr.each { |entry|
|
|
| 63 |
labels_array = (multigraphs[entry[0]] ||= []) |
|
| 64 |
labels_array.push [entry[1], entry[2]] |
|
| 65 |
} |
|
| 66 |
} |
|
| 67 |
|
|
| 68 |
multigraphs.each_pair { |unit, labels_and_negatives|
|
|
| 69 |
# now just add options to the config |
|
| 70 |
|
|
| 71 |
config_options.concat [ |
|
| 72 |
"multigraph #{name(unit)}",
|
|
| 73 |
"graph_title Netstat: #{@protocol}: #{@name}#{" (#{unit})" if multigraphs.size > 1}",
|
|
| 74 |
"graph_category network", |
|
| 75 |
"graph_order #{labels_and_negatives.map { |label, _negative| label.escape }.join(' ')}"
|
|
| 76 |
] |
|
| 77 |
|
|
| 78 |
config_options.push "graph_args --base 1024" if unit == :bytes |
|
| 79 |
has_negatives = false |
|
| 80 |
|
|
| 81 |
labels_and_negatives.each { |label, negative|
|
|
| 82 |
label_esc = label.escape |
|
| 83 |
has_negatives = true unless negative == nil |
|
| 84 |
|
|
| 85 |
if negative == true |
|
| 86 |
# the value has no opposite and is negative |
|
| 87 |
config_options.concat [ |
|
| 88 |
"#{label_esc}.graph no",
|
|
| 89 |
"#{label_esc}_neg.type DERIVE",
|
|
| 90 |
"#{label_esc}_neg.min 0",
|
|
| 91 |
"#{label_esc}_neg.draw LINE",
|
|
| 92 |
"#{label_esc}_neg.label #{label}",
|
|
| 93 |
"#{label_esc}_neg.negative #{label_esc}"
|
|
| 94 |
] |
|
| 95 |
else |
|
| 96 |
config_options.concat [ |
|
| 97 |
"#{label_esc}.type DERIVE",
|
|
| 98 |
"#{label_esc}.min 0",
|
|
| 99 |
"#{label_esc}.draw LINE",
|
|
| 100 |
"#{label_esc}.label #{label}"
|
|
| 101 |
] |
|
| 102 |
end |
|
| 103 |
|
|
| 104 |
if negative == false |
|
| 105 |
# the value has no opposite and is positive |
|
| 106 |
config_options.concat [ |
|
| 107 |
"#{label_esc}_neg.graph off",
|
|
| 108 |
"#{label_esc}.negative #{label_esc}_neg"
|
|
| 109 |
] |
|
| 110 |
elsif negative |
|
| 111 |
negative_esc = negative.escape |
|
| 112 |
config_options.concat [ |
|
| 113 |
"#{label_esc}.negative #{negative_esc}",
|
|
| 114 |
"#{negative_esc}.graph no"
|
|
| 115 |
] |
|
| 116 |
end |
|
| 117 |
} |
|
| 118 |
|
|
| 119 |
config_options.push "graph_vlabel per second#{" in (-) / out (+)" if has_negatives}"
|
|
| 120 |
} |
|
| 121 |
|
|
| 122 |
config_options |
|
| 123 |
end |
|
| 124 |
|
|
| 125 |
def fetch(data) |
|
| 126 |
output_data = [] |
|
| 127 |
|
|
| 128 |
# first build a set of multigraphs, one per unit. |
|
| 129 |
# Hash key is unit, and the value is a hash of 'escaped label' => 'value' |
|
| 130 |
multigraphs = {}
|
|
| 131 |
@parse_expr.each { |expr, descr|
|
|
| 132 |
next unless descr # no label - skip this entry |
|
| 133 |
|
|
| 134 |
index = data.index { |line| line =~ expr }
|
|
| 135 |
if index |
|
| 136 |
data.delete_at index |
|
| 137 |
$~[1..-1].zip(descr).each { |value, info|
|
|
| 138 |
unit, label = info |
|
| 139 |
(multigraphs[unit] ||= {})[label.escape] = value
|
|
| 140 |
} |
|
| 141 |
else |
|
| 142 |
warn "no line found for #{expr}, #{descr}" if $debug_mode
|
|
| 143 |
end |
|
| 144 |
} |
|
| 145 |
|
|
| 146 |
multigraphs.each_pair { |unit, values|
|
|
| 147 |
output_data.push "multigraph #{name(unit)}"
|
|
| 148 |
output_data += values.map { |label, value| "#{label}.value #{value}" }
|
|
| 149 |
} |
|
| 150 |
|
|
| 151 |
output_data |
|
| 152 |
end |
|
| 153 |
|
|
| 154 |
def name(unit) |
|
| 155 |
"#{PLUGIN_NAME}#{@protocol}_#{@name.escape}_#{unit}"
|
|
| 156 |
end |
|
| 151 | 157 |
end |
| 152 | 158 |
|
| 153 | 159 |
def graphs_for(protocol) |
| 154 |
case protocol |
|
| 155 |
# Order of the graps in each section is important for parsing. |
|
| 156 |
# At the same time, it is not important for munin, so we are OK placing it in parsing order here. |
|
| 157 |
when 'tcp' |
|
| 158 |
$os == :linux ? [ |
|
| 159 |
Graph.new('sent', protocol, [
|
|
| 160 |
# Description of the elements of arrays below: |
|
| 161 |
# 0: regexp to parse the line |
|
| 162 |
# 1: Array<Array[3]> for each matching group in the regular expression. |
|
| 163 |
# 0: unit name |
|
| 164 |
# 1: label |
|
| 165 |
# 2 (optional): negative label |
|
| 166 |
# It could be reasonable to add more elements as warning and critical values. |
|
| 167 |
|
|
| 168 |
[ /(\d+) segments send out$/, [ [ :segments, 'total' ] ] ], |
|
| 169 |
[ /(\d+) segments retransmitted$/, [ [ :segments, 'retransmitted' ] ] ] |
|
| 170 |
]), |
|
| 171 |
|
|
| 172 |
Graph.new('received', protocol, [
|
|
| 173 |
[ /(\d+) segments received$/, [ [ :segments, 'total' ] ] ], |
|
| 174 |
[ /(\d+) bad segments received.$/, [ [ :segments, 'bad' ] ] ] |
|
| 175 |
]), |
|
| 176 |
|
|
| 177 |
Graph.new('connections', protocol, [
|
|
| 178 |
[ /(\d+) active connections openings$/, [ [ :connections, 'active openings' ] ] ], |
|
| 179 |
[ /(\d+) passive connection openings$/, [ [ :connections, 'passive openings' ] ] ], |
|
| 180 |
[ /(\d+) failed connection attempts$/, [ [ :connections, 'failed attempts' ] ] ], |
|
| 181 |
[ /(\d+) connection resets received$/, [ [ :connections, 'RST received' ] ] ], |
|
| 182 |
[ /(\d+) connections established$/, [ [ :connections, 'established' ] ] ], |
|
| 183 |
[ /(\d+) resets sent$/, [ [ :connections, 'RST sent' ] ] ] |
|
| 184 |
]), |
|
| 185 |
|
|
| 186 |
Graph.new('timeouts', protocol, [
|
|
| 187 |
[ /(\d+) timeouts after SACK recovery$/, [ [ :segments, 'after SACK recovery' ] ] ], |
|
| 188 |
[ /(\d+) other TCP timeouts$/, [ [ :segments, 'other TCP' ] ] ], |
|
| 189 |
[ /(\d+) timeouts in loss state$/, [ [ :segments, 'in a loss state' ] ] ] |
|
| 190 |
]) |
|
| 191 |
] : [ |
|
| 192 |
Graph.new('sent', protocol, [
|
|
| 193 |
[ /(\d+) packets sent$/, [ [ :packets, 'total' ] ] ], |
|
| 194 |
[ /(\d+) data packets \((\d+) bytes\)$/, [ [ :packets, 'data' ], [ :bytes, 'data' ] ] ], |
|
| 195 |
[ /(\d+) data packets \((\d+) bytes\) retransmitted$/, [ [ :packets, 'retransmitted' ], [ :bytes, 'retransmitted' ] ] ], |
|
| 196 |
[ /(\d+) data packets unnecessarily retransmitted$/, [ [ :packets, 'unnecessarily retransmitted' ] ] ], |
|
| 197 |
[ /(\d+) resends initiated by MTU discovery$/, [ [ :packets, 'resends initiated by MTU discovery' ] ] ], |
|
| 198 |
[ /(\d+) ack-only packets \((\d+) delayed\)$/, [ [ :packets, 'ack-only' ], [ :packets, 'ack-only delayed' ] ] ], |
|
| 199 |
[ /(\d+) URG only packets$/, [ [ :packets, 'URG only' ] ] ], |
|
| 200 |
[ /(\d+) window probe packets$/, [ [ :packets, 'window probe' ] ] ], |
|
| 201 |
[ /(\d+) window update packets$/, [ [ :packets, 'window update' ] ] ], |
|
| 202 |
[ /(\d+) control packets$/, [ [ :packets, 'control' ] ] ] |
|
| 203 |
]), |
|
| 204 |
|
|
| 205 |
Graph.new('received', protocol, [
|
|
| 206 |
[ /(\d+) packets received$/, [ [ :packets, 'total' ] ] ], |
|
| 207 |
[ /(\d+) acks \(for (\d+) bytes\)$/, [ [ :packets, 'acks' ], [ :bytes, 'acks' ] ] ], |
|
| 208 |
[ /(\d+) duplicate acks$/, [ [ :packets, 'duplicate acks' ] ] ], |
|
| 209 |
[ /(\d+) acks for unsent data$/, [ [ :packets, 'acks for unsent data' ] ] ], |
|
| 210 |
[ /(\d+) packets \((\d+) bytes\) received in-sequence$/, [ [ :packets, 'in-sequence' ], [ :bytes, 'in-sequence' ] ] ], |
|
| 211 |
[ /(\d+) completely duplicate packets \((\d+) bytes\)$/, [ [ :packets, 'completely duplicate' ], [ :bytes, 'completely duplicate' ] ] ], |
|
| 212 |
[ /(\d+) old duplicate packets$/, [ [ :packets, 'old duplicate' ] ] ], |
|
| 213 |
[ /(\d+) packets with some dup\. data \((\d+) bytes duped\)$/, [ [ :packets, 'some dup. data' ], [ :bytes, 'partial dups' ] ] ], |
|
| 214 |
[ /(\d+) out-of-order packets \((\d+) bytes\)$/, [ [ :packets, 'out-of-order' ], [ :bytes, 'out-of-order' ] ] ], |
|
| 215 |
[ /(\d+) packets \((\d+) bytes\) of data after window$/, [ [ :packets, 'data after window' ], [ :bytes, 'data after window' ] ] ], |
|
| 216 |
[ /(\d+) window probes$/, [ [ :packets, 'window probes' ] ] ], |
|
| 217 |
[ /(\d+) window update packets$/, [ [ :packets, 'window update' ] ] ], |
|
| 218 |
[ /(\d+) packets received after close$/, [ [ :packets, 'after close' ] ] ], |
|
| 219 |
[ /(\d+) discarded for bad checksums$/, [ [ :packets, 'bad checksums' ] ] ], |
|
| 220 |
[ /(\d+) discarded for bad header offset fields?$/, [ [ :packets, 'bad header offset flds' ] ] ], |
|
| 221 |
[ /(\d+) discarded because packet too short$/, [ [ :packets, 'too short' ] ] ], |
|
| 222 |
[ /(\d+) discarded due to memory problems$/, [ [ :packets, 'discarded: memory problems' ] ] ], |
|
| 223 |
[ /(\d+) ignored RSTs in the windows$/, [ [ :packets, 'ignored RSTs in windows' ] ] ], |
|
| 224 |
[ /(\d+) segments updated rtt \(of (\d+) attempts\)$/, [ [ :packets, 'RTT: updated' ], [ :packets, 'RTT: attempts to update' ] ] ] |
|
| 225 |
]), |
|
| 226 |
|
|
| 227 |
Graph.new('connections', protocol, [
|
|
| 228 |
[ /(\d+) connection requests$/, [ [ :connections, 'requests' ] ] ], |
|
| 229 |
[ /(\d+) connection accepts$/, [ [ :connections, 'accepts' ] ] ], |
|
| 230 |
[ /(\d+) bad connection attempts$/, [ [ :connections, 'bad attempts' ] ] ], |
|
| 231 |
[ /(\d+) listen queue overflows$/, [ [ :connections, 'listen queue overflows' ] ] ], |
|
| 232 |
[ /(\d+) connections established \(including accepts\)$/, [ [ :connections, 'established' ] ] ], |
|
| 233 |
[ /(\d+) connections closed \(including (\d+) drops\)$/, [ [ :connections, 'closed' ], [ :connections, 'dropped' ] ] ], |
|
| 234 |
[ /(\d+) connections updated cached RTT on close$/, [ [ :connections, 'closed & upd cached RTT' ] ] ], |
|
| 235 |
[ /(\d+) connections updated cached RTT variance on close$/, [ [ :connections, 'closed & upd cached RTT variance' ] ] ], |
|
| 236 |
[ /(\d+) connections updated cached ssthresh on close$/, [ [ :connections, 'closed & upd cached ssthresh' ] ] ], |
|
| 237 |
[ /(\d+) embryonic connections dropped$/, [ [ :connections, 'embryonic dropped' ] ] ] |
|
| 238 |
]), |
|
| 239 |
|
|
| 240 |
Graph.new('timeouts', protocol, [
|
|
| 241 |
[ /(\d+) retransmit timeouts$/, [ [ :connections, 'retransmit' ] ] ], |
|
| 242 |
[ /(\d+) connections dropped by rexmit timeout$/, [ [ :connections, 'retransmit: dropped' ] ] ], |
|
| 243 |
[ /(\d+) persist timeouts$/, [ [ :connections, 'persist' ] ] ], |
|
| 244 |
[ /(\d+) connections dropped by persist timeout$/, [ [ :connections, 'persist: dropped' ] ] ], |
|
| 245 |
[ /(\d+) Connections \(fin_wait_2\) dropped because of timeout$/, [ [ :connections, 'fin_wait_2: dropped' ] ] ], |
|
| 246 |
[ /(\d+) keepalive timeouts$/, [ [ :connections, 'keepalive' ] ] ], |
|
| 247 |
[ /(\d+) keepalive probes sent$/, [ [ :connections, 'keepalive: probes sent' ] ] ], |
|
| 248 |
[ /(\d+) connections dropped by keepalive$/, [ [ :connections, 'keepalive: dropped' ] ] ] |
|
| 249 |
]), |
|
| 250 |
|
|
| 251 |
Graph.new('correct predictions', protocol, [
|
|
| 252 |
[ /(\d+) correct ACK header predictions$/, [ [ :predictions, 'ACK header' ] ] ], |
|
| 253 |
[ /(\d+) correct data packet header predictions$/, [ [ :predictions, 'data packet header' ] ] ] |
|
| 254 |
]), |
|
| 255 |
|
|
| 256 |
Graph.new('SYN', protocol, [
|
|
| 257 |
[ /(\d+) syncache entries added$/, [ [ :entries, 'cache added' ] ] ], |
|
| 258 |
[ /(\d+) cookies sent$/, [ [ :entries, 'cookies sent' ] ] ], |
|
| 259 |
[ /(\d+) cookies received$/, [ [ :entries, 'cookies received' ] ] ], |
|
| 260 |
[ /(\d+) retransmitted$/, [ [ :entries, 'retransmitted' ] ] ], |
|
| 261 |
[ /(\d+) dupsyn$/, [ [ :entries, 'duplicates' ] ] ], |
|
| 262 |
[ /(\d+) dropped$/, [ [ :entries, 'dropped' ] ] ], |
|
| 263 |
[ /(\d+) completed$/, [ [ :entries, 'completed' ] ] ], |
|
| 264 |
[ /(\d+) bucket overflow$/, [ [ :entries, 'bucket overflow' ] ] ], |
|
| 265 |
[ /(\d+) cache overflow$/, [ [ :entries, 'cache overflow' ] ] ], |
|
| 266 |
[ /(\d+) reset$/, [ [ :entries, 'reset' ] ] ], |
|
| 267 |
[ /(\d+) stale$/, [ [ :entries, 'stale' ] ] ], |
|
| 268 |
[ /(\d+) aborted$/, [ [ :entries, 'aborted' ] ] ], |
|
| 269 |
[ /(\d+) badack$/, [ [ :entries, 'bad ACK' ] ] ], |
|
| 270 |
[ /(\d+) unreach$/, [ [ :entries, 'unreachable' ] ] ], |
|
| 271 |
[ /(\d+) zone failures$/, [ [ :entries, 'zone failures' ] ] ], |
|
| 272 |
[ /(\d+) hostcache entries added$/, [ [ :entries, 'hostcache added' ] ] ], |
|
| 273 |
[ /(\d+) bucket overflow$/, [ [ :entries, 'hostcache overflow' ] ] ] |
|
| 274 |
]), |
|
| 275 |
|
|
| 276 |
Graph.new('SACK', protocol, [
|
|
| 277 |
[ /(\d+) SACK recovery episodes$/, [ [ :packets, 'recovery episodes' ] ] ], |
|
| 278 |
[ /(\d+) segment rexmits in SACK recovery episodes$/, [ [ :packets, 'segment rexmits' ] ] ], |
|
| 279 |
[ /(\d+) byte rexmits in SACK recovery episodes$/, [ [ :bytes, 'bytes rexmitted' ] ] ], |
|
| 280 |
[ /(\d+) SACK options \(SACK blocks\) received$/, [ [ :packets, 'options blocks rcvd' ] ] ], |
|
| 281 |
[ /(\d+) SACK options \(SACK blocks\) sent$/, [ [ :packets, 'options blocks sent' ] ] ], |
|
| 282 |
[ /(\d+) SACK scoreboard overflow$/, [ [ :packets, 'scoreboard overflow' ] ] ] |
|
| 283 |
]), |
|
| 284 |
|
|
| 285 |
Graph.new('ECN', protocol, [
|
|
| 286 |
[ /(\d+) packets with ECN CE bit set$/, [ [ :packets, 'CE bit' ] ] ], |
|
| 287 |
[ /(\d+) packets with ECN ECT\(0\) bit set$/, [ [ :packets, 'ECT(0) bit' ] ] ], |
|
| 288 |
[ /(\d+) packets with ECN ECT\(1\) bit set$/, [ [ :packets, 'ECT(1) bit' ] ] ], |
|
| 289 |
[ /(\d+) successful ECN handshakes$/, [ [ :packets, 'successful handshakes' ] ] ], |
|
| 290 |
[ /(\d+) times ECN reduced the congestion window$/, [ [ :packets, 'congestion window reduced' ] ] ] |
|
| 291 |
]) |
|
| 292 |
] |
|
| 293 |
when 'udp' |
|
| 294 |
$os == :linux ? [ |
|
| 295 |
] : [ |
|
| 296 |
Graph.new('received', protocol, [
|
|
| 297 |
[ /(\d+) datagrams received$/, [ [ :packets, 'total' ] ] ], |
|
| 298 |
[ /(\d+) with incomplete header$/, [ [ :packets, 'incomplete header' ] ] ], |
|
| 299 |
[ /(\d+) with bad data length field$/, [ [ :packets, 'bad data length field' ] ] ], |
|
| 300 |
[ /(\d+) with bad checksum$/, [ [ :packets, 'bad checksum' ] ] ], |
|
| 301 |
[ /(\d+) with no checksum$/, [ [ :packets, 'no checksum' ] ] ], |
|
| 302 |
[ /(\d+) dropped due to no socket$/, [ [ :packets, 'dropped: no socket' ] ] ], |
|
| 303 |
[ /(\d+) broadcast\/multicast datagrams undelivered$/, [ [ :packets, '*cast undelivered' ] ] ], |
|
| 304 |
[ /(\d+) dropped due to full socket buffers$/, [ [ :packets, 'dropped: no buffers' ] ] ], |
|
| 305 |
[ /(\d+) not for hashed pcb$/, [ [ :packets, 'not for hashed pcb' ] ] ], |
|
| 306 |
[ /(\d+) delivered$/, [ [ :packets, 'delivered' ] ] ] |
|
| 307 |
]), |
|
| 308 |
|
|
| 309 |
Graph.new('sent', protocol, [
|
|
| 310 |
[ /(\d+) datagrams output$/, [ [ :packets, 'total' ] ] ], |
|
| 311 |
[ /(\d+) times multicast source filter matched$/, [ [ :packets, 'multicast src filter match' ] ] ] |
|
| 312 |
]) |
|
| 313 |
] |
|
| 314 |
when 'ip' |
|
| 315 |
$os == :linux ? [ |
|
| 316 |
] : [ |
|
| 317 |
Graph.new('received', protocol, [
|
|
| 318 |
[ /(\d+) total packets received$/, [ [ :packets, 'total' ] ] ], |
|
| 319 |
[ /(\d+) bad header checksums$/, [ [ :packets, 'bad header checksum' ] ] ], |
|
| 320 |
[ /(\d+) with size smaller than minimum$/, [ [ :packets, 'size smaller than min' ] ] ], |
|
| 321 |
[ /(\d+) with data size < data length$/, [ [ :packets, 'data size < data length' ] ] ], |
|
| 322 |
[ /(\d+) with ip length > max ip packet size$/, [ [ :packets, 'ip length > max ip packet sz' ] ] ], |
|
| 323 |
[ /(\d+) with header length < data size$/, [ [ :packets, 'header length < data size' ] ] ], |
|
| 324 |
[ /(\d+) with data length < header length$/, [ [ :packets, 'data length < header length' ] ] ], |
|
| 325 |
[ /(\d+) with bad options$/, [ [ :packets, 'bad options' ] ] ], |
|
| 326 |
[ /(\d+) with incorrect version number$/, [ [ :packets, 'incorrect version' ] ] ], |
|
| 327 |
[ /(\d+) fragments? received$/, [ [ :packets, 'fragments' ] ] ], |
|
| 328 |
[ /(\d+) fragments? dropped \(dup or out of space\)$/, [ [ :packets, 'frags dropped: dup/out of spc' ] ] ], |
|
| 329 |
[ /(\d+) fragments? dropped after timeout$/, [ [ :packets, 'frags dropped: timeout' ] ] ], |
|
| 330 |
[ /(\d+) packets? reassembled ok$/, [ [ :packets, 'reassembled ok' ] ] ], |
|
| 331 |
[ /(\d+) packets? for this host$/, [ [ :packets, 'for this host' ] ] ], |
|
| 332 |
[ /(\d+) packets? for unknown\/unsupported protocol$/, [ [ :packets, 'for unknown/unsup protocol' ] ] ], |
|
| 333 |
[ /(\d+) packets? forwarded \((\d+) packets fast forwarded\)$/, [ [ :packets, 'forwarded' ], [ :packets, 'fast forwarded' ] ] ], |
|
| 334 |
[ /(\d+) packets? not forwardable$/, [ [ :packets, 'not forwardable' ] ] ], |
|
| 335 |
[ /(\d+) packets? received for unknown multicast group$/, [ [ :packets, 'unknown multicast grp' ] ] ] |
|
| 336 |
]), |
|
| 337 |
|
|
| 338 |
Graph.new('sent', protocol, [
|
|
| 339 |
[ /(\d+) packets? sent from this host$/, [ [ :packets, 'total' ] ] ], |
|
| 340 |
[ /(\d+) redirects? sent$/, [ [ :packets, 'redirect' ] ] ], |
|
| 341 |
[ /(\d+) packets? sent with fabricated ip header$/, [ [ :packets, 'fabricated IP head' ] ] ], |
|
| 342 |
[ /(\d+) output packets? dropped due to no bufs, etc\.$/, [ [ :packets, 'dropped: no bufs, etc' ] ] ], |
|
| 343 |
[ /(\d+) output packets? discarded due to no route$/, [ [ :packets, 'discarded: no route' ] ] ], |
|
| 344 |
[ /(\d+) output datagrams? fragmented$/, [ [ :packets, 'fragmented' ] ] ], |
|
| 345 |
[ /(\d+) fragments? created$/, [ [ :packets, 'fragments created' ] ] ], |
|
| 346 |
[ /(\d+) datagrams? that can't be fragmented$/, [ [ :packets, "can't be fragmented" ] ] ], |
|
| 347 |
[ /(\d+) tunneling packets? that can't find gif$/, [ [ :packets, 'tunneling, gif not found' ] ] ], |
|
| 348 |
[ /(\d+) datagrams? with bad address in header$/, [ [ :packets, 'bad address in header' ] ] ] |
|
| 349 |
]) |
|
| 350 |
] |
|
| 351 |
when 'arp' |
|
| 352 |
$os == :linux ? [] : [ |
|
| 353 |
Graph.new('packets', protocol, [
|
|
| 354 |
# This is just a total, so ignore the value but keep regexp to avoid 'not parsed' warning. |
|
| 355 |
[ /(\d+) ARP packets? received$/ ], |
|
| 356 |
[ /(\d+) ARP requests? received$/, [ [ :packets, 'requests received' ] ] ], |
|
| 357 |
[ /(\d+) ARP repl(?:y|ies) received$/, [ [ :packets, 'replies received' ] ] ], |
|
| 358 |
[ /(\d+) ARP requests? sent$/, [ [ :packets, 'requests', 'requests received' ] ] ], |
|
| 359 |
[ /(\d+) ARP repl(?:y|ies) sent$/, [ [ :packets, 'replies', 'replies received' ] ] ], |
|
| 360 |
[ /(\d+) total packets? dropped due to no ARP entry$/, [ [ :packets, 'no entry' ] ] ] |
|
| 361 |
]), |
|
| 362 |
|
|
| 363 |
Graph.new('entries', protocol, [
|
|
| 364 |
[ /(\d+) ARP entrys? timed out$/, [ [ :entries, 'timed out' ] ] ], |
|
| 365 |
[ /(\d+) Duplicate IPs seen$/, [ [ :entries, 'duplicate IPs seen' ] ] ] |
|
| 366 |
]) |
|
| 367 |
] |
|
| 368 |
end |
|
| 160 |
case protocol |
|
| 161 |
# Order of the graps in each section is important for parsing. |
|
| 162 |
# At the same time, it is not important for munin, so we are OK placing it in parsing order here. |
|
| 163 |
when 'tcp' |
|
| 164 |
$os == :linux ? [ |
|
| 165 |
Graph.new('sent', protocol, [
|
|
| 166 |
# Description of the elements of arrays below: |
|
| 167 |
# 0: regexp to parse the line |
|
| 168 |
# 1: Array<Array[3]> for each matching group in the regular expression. |
|
| 169 |
# 0: unit name |
|
| 170 |
# 1: label |
|
| 171 |
# 2 (optional): negative label |
|
| 172 |
# It could be reasonable to add more elements as warning and critical values. |
|
| 173 |
|
|
| 174 |
[/(\d+) segments send out$/, [[:segments, 'total']]], |
|
| 175 |
[/(\d+) segments retransmitted$/, [[:segments, 'retransmitted']]] |
|
| 176 |
]), |
|
| 177 |
|
|
| 178 |
Graph.new('received', protocol, [
|
|
| 179 |
[/(\d+) segments received$/, [[:segments, 'total']]], |
|
| 180 |
[/(\d+) bad segments received.$/, [[:segments, 'bad']]] |
|
| 181 |
]), |
|
| 182 |
|
|
| 183 |
Graph.new('connections', protocol, [
|
|
| 184 |
[/(\d+) active connections openings$/, [[:connections, 'active openings']]], |
|
| 185 |
[/(\d+) passive connection openings$/, [[:connections, 'passive openings']]], |
|
| 186 |
[/(\d+) failed connection attempts$/, [[:connections, 'failed attempts']]], |
|
| 187 |
[/(\d+) connection resets received$/, [[:connections, 'RST received']]], |
|
| 188 |
[/(\d+) connections established$/, [[:connections, 'established']]], |
|
| 189 |
[/(\d+) resets sent$/, [[:connections, 'RST sent']]] |
|
| 190 |
]), |
|
| 191 |
|
|
| 192 |
Graph.new('timeouts', protocol, [
|
|
| 193 |
[/(\d+) timeouts after SACK recovery$/, [[:segments, 'after SACK recovery']]], |
|
| 194 |
[/(\d+) other TCP timeouts$/, [[:segments, 'other TCP']]], |
|
| 195 |
[/(\d+) timeouts in loss state$/, [[:segments, 'in a loss state']]] |
|
| 196 |
]) |
|
| 197 |
] : [ |
|
| 198 |
Graph.new('sent', protocol, [
|
|
| 199 |
[/(\d+) packets sent$/, [[:packets, 'total']]], |
|
| 200 |
[/(\d+) data packets \((\d+) bytes\)$/, [[:packets, 'data'], [:bytes, 'data']]], |
|
| 201 |
[/(\d+) data packets \((\d+) bytes\) retransmitted$/, [[:packets, 'retransmitted'], [:bytes, 'retransmitted']]], |
|
| 202 |
[/(\d+) data packets unnecessarily retransmitted$/, [[:packets, 'unnecessarily retransmitted']]], |
|
| 203 |
[/(\d+) resends initiated by MTU discovery$/, [[:packets, 'resends initiated by MTU discovery']]], |
|
| 204 |
[/(\d+) ack-only packets \((\d+) delayed\)$/, [[:packets, 'ack-only'], [:packets, 'ack-only delayed']]], |
|
| 205 |
[/(\d+) URG only packets$/, [[:packets, 'URG only']]], |
|
| 206 |
[/(\d+) window probe packets$/, [[:packets, 'window probe']]], |
|
| 207 |
[/(\d+) window update packets$/, [[:packets, 'window update']]], |
|
| 208 |
[/(\d+) control packets$/, [[:packets, 'control']]] |
|
| 209 |
]), |
|
| 210 |
|
|
| 211 |
Graph.new('received', protocol, [
|
|
| 212 |
[/(\d+) packets received$/, [[:packets, 'total']]], |
|
| 213 |
[/(\d+) acks \(for (\d+) bytes\)$/, [[:packets, 'acks'], [:bytes, 'acks']]], |
|
| 214 |
[/(\d+) duplicate acks$/, [[:packets, 'duplicate acks']]], |
|
| 215 |
[/(\d+) acks for unsent data$/, [[:packets, 'acks for unsent data']]], |
|
| 216 |
[/(\d+) packets \((\d+) bytes\) received in-sequence$/, [[:packets, 'in-sequence'], [:bytes, 'in-sequence']]], |
|
| 217 |
[/(\d+) completely duplicate packets \((\d+) bytes\)$/, [[:packets, 'completely duplicate'], [:bytes, 'completely duplicate']]], |
|
| 218 |
[/(\d+) old duplicate packets$/, [[:packets, 'old duplicate']]], |
|
| 219 |
[/(\d+) packets with some dup\. data \((\d+) bytes duped\)$/, [[:packets, 'some dup. data'], [:bytes, 'partial dups']]], |
|
| 220 |
[/(\d+) out-of-order packets \((\d+) bytes\)$/, [[:packets, 'out-of-order'], [:bytes, 'out-of-order']]], |
|
| 221 |
[/(\d+) packets \((\d+) bytes\) of data after window$/, [[:packets, 'data after window'], [:bytes, 'data after window']]], |
|
| 222 |
[/(\d+) window probes$/, [[:packets, 'window probes']]], |
|
| 223 |
[/(\d+) window update packets$/, [[:packets, 'window update']]], |
|
| 224 |
[/(\d+) packets received after close$/, [[:packets, 'after close']]], |
|
| 225 |
[/(\d+) discarded for bad checksums$/, [[:packets, 'bad checksums']]], |
|
| 226 |
[/(\d+) discarded for bad header offset fields?$/, [[:packets, 'bad header offset flds']]], |
|
| 227 |
[/(\d+) discarded because packet too short$/, [[:packets, 'too short']]], |
|
| 228 |
[/(\d+) discarded due to memory problems$/, [[:packets, 'discarded: memory problems']]], |
|
| 229 |
[/(\d+) ignored RSTs in the windows$/, [[:packets, 'ignored RSTs in windows']]], |
|
| 230 |
[/(\d+) segments updated rtt \(of (\d+) attempts\)$/, [[:packets, 'RTT: updated'], [:packets, 'RTT: attempts to update']]] |
|
| 231 |
]), |
|
| 232 |
|
|
| 233 |
Graph.new('connections', protocol, [
|
|
| 234 |
[/(\d+) connection requests$/, [[:connections, 'requests']]], |
|
| 235 |
[/(\d+) connection accepts$/, [[:connections, 'accepts']]], |
|
| 236 |
[/(\d+) bad connection attempts$/, [[:connections, 'bad attempts']]], |
|
| 237 |
[/(\d+) listen queue overflows$/, [[:connections, 'listen queue overflows']]], |
|
| 238 |
[/(\d+) connections established \(including accepts\)$/, [[:connections, 'established']]], |
|
| 239 |
[/(\d+) connections closed \(including (\d+) drops\)$/, [[:connections, 'closed'], [:connections, 'dropped']]], |
|
| 240 |
[/(\d+) connections updated cached RTT on close$/, [[:connections, 'closed & upd cached RTT']]], |
|
| 241 |
[/(\d+) connections updated cached RTT variance on close$/, [[:connections, 'closed & upd cached RTT variance']]], |
|
| 242 |
[/(\d+) connections updated cached ssthresh on close$/, [[:connections, 'closed & upd cached ssthresh']]], |
|
| 243 |
[/(\d+) embryonic connections dropped$/, [[:connections, 'embryonic dropped']]] |
|
| 244 |
]), |
|
| 245 |
|
|
| 246 |
Graph.new('timeouts', protocol, [
|
|
| 247 |
[/(\d+) retransmit timeouts$/, [[:connections, 'retransmit']]], |
|
| 248 |
[/(\d+) connections dropped by rexmit timeout$/, [[:connections, 'retransmit: dropped']]], |
|
| 249 |
[/(\d+) persist timeouts$/, [[:connections, 'persist']]], |
|
| 250 |
[/(\d+) connections dropped by persist timeout$/, [[:connections, 'persist: dropped']]], |
|
| 251 |
[/(\d+) Connections \(fin_wait_2\) dropped because of timeout$/, [[:connections, 'fin_wait_2: dropped']]], |
|
| 252 |
[/(\d+) keepalive timeouts$/, [[:connections, 'keepalive']]], |
|
| 253 |
[/(\d+) keepalive probes sent$/, [[:connections, 'keepalive: probes sent']]], |
|
| 254 |
[/(\d+) connections dropped by keepalive$/, [[:connections, 'keepalive: dropped']]] |
|
| 255 |
]), |
|
| 256 |
|
|
| 257 |
Graph.new('correct predictions', protocol, [
|
|
| 258 |
[/(\d+) correct ACK header predictions$/, [[:predictions, 'ACK header']]], |
|
| 259 |
[/(\d+) correct data packet header predictions$/, [[:predictions, 'data packet header']]] |
|
| 260 |
]), |
|
| 261 |
|
|
| 262 |
Graph.new('SYN', protocol, [
|
|
| 263 |
[/(\d+) syncache entries added$/, [[:entries, 'cache added']]], |
|
| 264 |
[/(\d+) cookies sent$/, [[:entries, 'cookies sent']]], |
|
| 265 |
[/(\d+) cookies received$/, [[:entries, 'cookies received']]], |
|
| 266 |
[/(\d+) retransmitted$/, [[:entries, 'retransmitted']]], |
|
| 267 |
[/(\d+) dupsyn$/, [[:entries, 'duplicates']]], |
|
| 268 |
[/(\d+) dropped$/, [[:entries, 'dropped']]], |
|
| 269 |
[/(\d+) completed$/, [[:entries, 'completed']]], |
|
| 270 |
[/(\d+) bucket overflow$/, [[:entries, 'bucket overflow']]], |
|
| 271 |
[/(\d+) cache overflow$/, [[:entries, 'cache overflow']]], |
|
| 272 |
[/(\d+) reset$/, [[:entries, 'reset']]], |
|
| 273 |
[/(\d+) stale$/, [[:entries, 'stale']]], |
|
| 274 |
[/(\d+) aborted$/, [[:entries, 'aborted']]], |
|
| 275 |
[/(\d+) badack$/, [[:entries, 'bad ACK']]], |
|
| 276 |
[/(\d+) unreach$/, [[:entries, 'unreachable']]], |
|
| 277 |
[/(\d+) zone failures$/, [[:entries, 'zone failures']]], |
|
| 278 |
[/(\d+) hostcache entries added$/, [[:entries, 'hostcache added']]], |
|
| 279 |
[/(\d+) bucket overflow$/, [[:entries, 'hostcache overflow']]] |
|
| 280 |
]), |
|
| 281 |
|
|
| 282 |
Graph.new('SACK', protocol, [
|
|
| 283 |
[/(\d+) SACK recovery episodes$/, [[:packets, 'recovery episodes']]], |
|
| 284 |
[/(\d+) segment rexmits in SACK recovery episodes$/, [[:packets, 'segment rexmits']]], |
|
| 285 |
[/(\d+) byte rexmits in SACK recovery episodes$/, [[:bytes, 'bytes rexmitted']]], |
|
| 286 |
[/(\d+) SACK options \(SACK blocks\) received$/, [[:packets, 'options blocks rcvd']]], |
|
| 287 |
[/(\d+) SACK options \(SACK blocks\) sent$/, [[:packets, 'options blocks sent']]], |
|
| 288 |
[/(\d+) SACK scoreboard overflow$/, [[:packets, 'scoreboard overflow']]] |
|
| 289 |
]), |
|
| 290 |
|
|
| 291 |
Graph.new('ECN', protocol, [
|
|
| 292 |
[/(\d+) packets with ECN CE bit set$/, [[:packets, 'CE bit']]], |
|
| 293 |
[/(\d+) packets with ECN ECT\(0\) bit set$/, [[:packets, 'ECT(0) bit']]], |
|
| 294 |
[/(\d+) packets with ECN ECT\(1\) bit set$/, [[:packets, 'ECT(1) bit']]], |
|
| 295 |
[/(\d+) successful ECN handshakes$/, [[:packets, 'successful handshakes']]], |
|
| 296 |
[/(\d+) times ECN reduced the congestion window$/, [[:packets, 'congestion window reduced']]] |
|
| 297 |
]) |
|
| 298 |
] |
|
| 299 |
when 'udp' |
|
| 300 |
$os == :linux ? [] : [ |
|
| 301 |
Graph.new('received', protocol, [
|
|
| 302 |
[/(\d+) datagrams received$/, [[:packets, 'total']]], |
|
| 303 |
[/(\d+) with incomplete header$/, [[:packets, 'incomplete header']]], |
|
| 304 |
[/(\d+) with bad data length field$/, [[:packets, 'bad data length field']]], |
|
| 305 |
[/(\d+) with bad checksum$/, [[:packets, 'bad checksum']]], |
|
| 306 |
[/(\d+) with no checksum$/, [[:packets, 'no checksum']]], |
|
| 307 |
[/(\d+) dropped due to no socket$/, [[:packets, 'dropped: no socket']]], |
|
| 308 |
[/(\d+) broadcast\/multicast datagrams undelivered$/, [[:packets, '*cast undelivered']]], |
|
| 309 |
[/(\d+) dropped due to full socket buffers$/, [[:packets, 'dropped: no buffers']]], |
|
| 310 |
[/(\d+) not for hashed pcb$/, [[:packets, 'not for hashed pcb']]], |
|
| 311 |
[/(\d+) delivered$/, [[:packets, 'delivered']]] |
|
| 312 |
]), |
|
| 313 |
|
|
| 314 |
Graph.new('sent', protocol, [
|
|
| 315 |
[/(\d+) datagrams output$/, [[:packets, 'total']]], |
|
| 316 |
[/(\d+) times multicast source filter matched$/, [[:packets, 'multicast src filter match']]] |
|
| 317 |
]) |
|
| 318 |
] |
|
| 319 |
when 'ip' |
|
| 320 |
$os == :linux ? [] : [ |
|
| 321 |
Graph.new('received', protocol, [
|
|
| 322 |
[/(\d+) total packets received$/, [[:packets, 'total']]], |
|
| 323 |
[/(\d+) bad header checksums$/, [[:packets, 'bad header checksum']]], |
|
| 324 |
[/(\d+) with size smaller than minimum$/, [[:packets, 'size smaller than min']]], |
|
| 325 |
[/(\d+) with data size < data length$/, [[:packets, 'data size < data length']]], |
|
| 326 |
[/(\d+) with ip length > max ip packet size$/, [[:packets, 'ip length > max ip packet sz']]], |
|
| 327 |
[/(\d+) with header length < data size$/, [[:packets, 'header length < data size']]], |
|
| 328 |
[/(\d+) with data length < header length$/, [[:packets, 'data length < header length']]], |
|
| 329 |
[/(\d+) with bad options$/, [[:packets, 'bad options']]], |
|
| 330 |
[/(\d+) with incorrect version number$/, [[:packets, 'incorrect version']]], |
|
| 331 |
[/(\d+) fragments? received$/, [[:packets, 'fragments']]], |
|
| 332 |
[/(\d+) fragments? dropped \(dup or out of space\)$/, [[:packets, 'frags dropped: dup/out of spc']]], |
|
| 333 |
[/(\d+) fragments? dropped after timeout$/, [[:packets, 'frags dropped: timeout']]], |
|
| 334 |
[/(\d+) packets? reassembled ok$/, [[:packets, 'reassembled ok']]], |
|
| 335 |
[/(\d+) packets? for this host$/, [[:packets, 'for this host']]], |
|
| 336 |
[/(\d+) packets? for unknown\/unsupported protocol$/, [[:packets, 'for unknown/unsup protocol']]], |
|
| 337 |
[/(\d+) packets? forwarded \((\d+) packets fast forwarded\)$/, [[:packets, 'forwarded'], [:packets, 'fast forwarded']]], |
|
| 338 |
[/(\d+) packets? not forwardable$/, [[:packets, 'not forwardable']]], |
|
| 339 |
[/(\d+) packets? received for unknown multicast group$/, [[:packets, 'unknown multicast grp']]] |
|
| 340 |
]), |
|
| 341 |
|
|
| 342 |
Graph.new('sent', protocol, [
|
|
| 343 |
[/(\d+) packets? sent from this host$/, [[:packets, 'total']]], |
|
| 344 |
[/(\d+) redirects? sent$/, [[:packets, 'redirect']]], |
|
| 345 |
[/(\d+) packets? sent with fabricated ip header$/, [[:packets, 'fabricated IP head']]], |
|
| 346 |
[/(\d+) output packets? dropped due to no bufs, etc\.$/, [[:packets, 'dropped: no bufs, etc']]], |
|
| 347 |
[/(\d+) output packets? discarded due to no route$/, [[:packets, 'discarded: no route']]], |
|
| 348 |
[/(\d+) output datagrams? fragmented$/, [[:packets, 'fragmented']]], |
|
| 349 |
[/(\d+) fragments? created$/, [[:packets, 'fragments created']]], |
|
| 350 |
[/(\d+) datagrams? that can't be fragmented$/, [[:packets, "can't be fragmented"]]], |
|
| 351 |
[/(\d+) tunneling packets? that can't find gif$/, [[:packets, 'tunneling, gif not found']]], |
|
| 352 |
[/(\d+) datagrams? with bad address in header$/, [[:packets, 'bad address in header']]] |
|
| 353 |
]) |
|
| 354 |
] |
|
| 355 |
when 'arp' |
|
| 356 |
$os == :linux ? [] : [ |
|
| 357 |
Graph.new('packets', protocol, [
|
|
| 358 |
# This is just a total, so ignore the value but keep regexp to avoid 'not parsed' warning. |
|
| 359 |
[/(\d+) ARP packets? received$/], |
|
| 360 |
[/(\d+) ARP requests? received$/, [[:packets, 'requests received']]], |
|
| 361 |
[/(\d+) ARP repl(?:y|ies) received$/, [[:packets, 'replies received']]], |
|
| 362 |
[/(\d+) ARP requests? sent$/, [[:packets, 'requests', 'requests received']]], |
|
| 363 |
[/(\d+) ARP repl(?:y|ies) sent$/, [[:packets, 'replies', 'replies received']]], |
|
| 364 |
[/(\d+) total packets? dropped due to no ARP entry$/, [[:packets, 'no entry']]] |
|
| 365 |
]), |
|
| 366 |
|
|
| 367 |
Graph.new('entries', protocol, [
|
|
| 368 |
[/(\d+) ARP entrys? timed out$/, [[:entries, 'timed out']]], |
|
| 369 |
[/(\d+) Duplicate IPs seen$/, [[:entries, 'duplicate IPs seen']]] |
|
| 370 |
]) |
|
| 371 |
] |
|
| 372 |
end |
|
| 369 | 373 |
end |
| 370 | 374 |
|
| 371 | 375 |
proto_name = File.basename($0, '.*').escape |
| ... | ... | |
| 374 | 378 |
proto_name = 'tcp' if proto_name.empty? |
| 375 | 379 |
|
| 376 | 380 |
def netstat_s(protocol) |
| 377 |
if $os == :linux
|
|
| 378 |
%w(tcp udp).include?(protocol) ?
|
|
| 379 |
`netstat -s --#{protocol}` :
|
|
| 380 |
`netstat -s --raw`
|
|
| 381 |
else
|
|
| 382 |
`netstat -sp #{protocol}`
|
|
| 383 |
end.lines.reject { |line| line =~ /^\w+:/ }
|
|
| 381 |
if $os == :linux
|
|
| 382 |
%w(tcp udp).include?(protocol) ?
|
|
| 383 |
`netstat -s --#{protocol}` :
|
|
| 384 |
`netstat -s --raw`
|
|
| 385 |
else
|
|
| 386 |
`netstat -sp #{protocol}`
|
|
| 387 |
end.lines.reject { |line| line =~ /^\w+:/ }
|
|
| 384 | 388 |
end |
| 385 | 389 |
|
| 386 | 390 |
case ARGV.first |
| 387 | 391 |
when 'autoconf' |
| 388 |
puts [:linux, :freebsd].include?($os) ? 'yes' : 'no'
|
|
| 392 |
puts [:linux, :freebsd].include?($os) ? 'yes' : 'no'
|
|
| 389 | 393 |
when 'suggest' |
| 390 |
puts $os == :linux ? %w(tcp) : %w(tcp udp ip arp)
|
|
| 394 |
puts $os == :linux ? %w(tcp) : %w(tcp udp ip arp)
|
|
| 391 | 395 |
when 'config' |
| 392 |
graphs_for(proto_name).each { |graph|
|
|
| 393 |
puts graph.config.join $/
|
|
| 394 |
}
|
|
| 396 |
graphs_for(proto_name).each { |graph|
|
|
| 397 |
puts graph.config.join $/
|
|
| 398 |
}
|
|
| 395 | 399 |
else |
| 396 |
data = netstat_s(proto_name)
|
|
| 397 |
graphs_for(proto_name).each { |graph|
|
|
| 398 |
puts graph.fetch(data).join $/
|
|
| 399 |
}
|
|
| 400 |
data = netstat_s(proto_name)
|
|
| 401 |
graphs_for(proto_name).each { |graph|
|
|
| 402 |
puts graph.fetch(data).join $/
|
|
| 403 |
}
|
|
| 400 | 404 |
|
| 401 |
warn "not parsed:\n#{data.join}" unless data.empty? if $debug_mode
|
|
| 405 |
warn "not parsed:\n#{data.join}" unless data.empty? if $debug_mode
|
|
| 402 | 406 |
end |
| 403 | 407 |
|
| 404 | 408 |
# awful performance when scrolling through those regexps above |
Formats disponibles : Unified diff