root / plugins / emc / emc_vnx_file_ @ 5fb6d925
Historique | Voir | Annoter | Télécharger (27,9 ko)
| 1 |
#!/bin/bash |
|---|---|
| 2 |
|
| 3 |
: <<=cut |
| 4 |
|
| 5 |
=head1 NAME |
| 6 |
|
| 7 |
emc_vnx_file_stats - Plugin to monitor Basic, NFSv3 and NFSv4 statistics of |
| 8 |
EMC VNX 5300 Unified Storage system's Datamovers |
| 9 |
|
| 10 |
=head1 AUTHOR |
| 11 |
|
| 12 |
Evgeny Beysembaev <megabotva@gmail.com> |
| 13 |
|
| 14 |
=head1 LICENSE |
| 15 |
|
| 16 |
GPLv2 |
| 17 |
|
| 18 |
=head1 MAGIC MARKERS |
| 19 |
|
| 20 |
#%# family=auto |
| 21 |
#%# capabilities=autoconf suggest |
| 22 |
|
| 23 |
=head1 DESCRIPTION |
| 24 |
|
| 25 |
The plugin monitors basic statistics of EMC Unified Storage system Datamovers |
| 26 |
and NFS statistics of EMC VNX5300 Unified Storage system. Probably it can |
| 27 |
also be compatible with other Isilon or Celerra systems. It uses SSH to connect |
| 28 |
to Control Stations, then remotely executes '/nas/sbin/server_stats' and |
| 29 |
fetches and parses data from it. It supports gathering data both from |
| 30 |
active/active and active/passive Datamover configurations, ignoring offline or |
| 31 |
standby Datamovers. |
| 32 |
If all Datamovers are offline or absent, the plugin returns error. |
| 33 |
This plugin also automatically chooses Primary Control Station from the list by |
| 34 |
calling '/nasmcd/sbin/getreason' and '/nasmcd/sbin/t2slot'. |
| 35 |
|
| 36 |
At the moment data is gathered from the following statistics sources: |
| 37 |
* nfs.v3.op - Tons of timings about NFSv3 RPC calls |
| 38 |
* nfs.v4.op - Tons of timings about NFSv4 RPC calls |
| 39 |
* nfs.client - Here new Client addresses are rescanned and added automatically. |
| 40 |
* basic-std Statistics Group - Basic Statistics of Datamovers (eg. CPU, Memory |
| 41 |
etc.) |
| 42 |
|
| 43 |
It's quite easy to comment out unneeded data to make graphs less overloaded or |
| 44 |
to add new statistics sources. |
| 45 |
|
| 46 |
The plugin has been tested in the following Operating Environment (OE): |
| 47 |
File Version T7.1.76.4 |
| 48 |
Block Revision 05.32.000.5.215 |
| 49 |
|
| 50 |
=head1 LIST OF GRAPHS |
| 51 |
|
| 52 |
These are Basic Datamover Graphs. |
| 53 |
Graph category CPU: |
| 54 |
EMC VNX 5300 Datamover CPU Util % |
| 55 |
Graph category Network: |
| 56 |
EMC VNX 5300 Datamover Network bytes over all interfaces |
| 57 |
EMC VNX 5300 Datamover Storage bytes over all interfaces |
| 58 |
Graph category Memory: |
| 59 |
EMC VNX 5300 Datamover Memory |
| 60 |
EMC VNX 5300 File Buffer Cache |
| 61 |
EMC VNX 5300 FileResolve |
| 62 |
|
| 63 |
These are NFS (v3,v4) Graphs. |
| 64 |
Graph category NFS: |
| 65 |
EMC VNX 5300 NFSv3 Calls per second |
| 66 |
EMC VNX 5300 NFSv3 uSeconds per call |
| 67 |
EMC VNX 5300 NFSv3 Op % |
| 68 |
EMC VNX 5300 NFSv4 Calls per second |
| 69 |
EMC VNX 5300 NFSv4 uSeconds per call |
| 70 |
EMC VNX 5300 NFSv4 Op % |
| 71 |
EMC VNX 5300 NFS Client Ops/s |
| 72 |
EMC VNX 5300 NFS Client B/s |
| 73 |
EMC VNX 5300 NFS Client Avg uSec/call |
| 74 |
EMC VNX 5300 Std NFS Ops/s |
| 75 |
EMC VNX 5300 Std NFS B/s |
| 76 |
EMC VNX 5300 Std NFS Average Size Bytes |
| 77 |
EMC VNX 5300 Std NFS Active Threads |
| 78 |
|
| 79 |
=head1 COMPATIBILITY |
| 80 |
|
| 81 |
The plugin has been written for being compatible with EMC VNX5300 Storage |
| 82 |
system, as this is the only EMC storage which i have. |
| 83 |
By the way, i am pretty sure it can also work with other VNX1 storages, like |
| 84 |
VNX5100 and VNX5500. |
| 85 |
About VNX2 series, i don't know whether the plugin will be able to work with |
| 86 |
them. Maybe it would need some corrections in command-line backend. The same |
| 87 |
situation is with other EMC systems, so i encourage you to try and fix the |
| 88 |
plugin. |
| 89 |
|
| 90 |
=head1 CONFIGURATION |
| 91 |
|
| 92 |
The plugin uses SSH to connect to Control Stations. It's possible to use |
| 93 |
'nasadmin' user, but it would be better if you create read-only global user by |
| 94 |
Unisphere Client. The user should have only Operator role. |
| 95 |
I created "operator" user but due to the fact that Control Stations already |
| 96 |
had one internal "operator" user, the new one was called "operator1". So be |
| 97 |
careful. After that, copy .bash_profile from /home/nasadmin to a newly created |
| 98 |
/home/operator1 |
| 99 |
|
| 100 |
On munin-node side choose a user which will be used to connect through SSH. |
| 101 |
Generally user "munin" is ok. Then, execute "sudo su munin -s /bin/bash", |
| 102 |
"ssh-keygen" and "ssh-copy-id" to both Control Stations with newly created |
| 103 |
user. |
| 104 |
|
| 105 |
Make a link from /usr/share/munin/plugins/emc_vnx_file_stats to |
| 106 |
/etc/munin/plugins/. If you want to get NFS statistics, name the link as |
| 107 |
"emc_vnx_file_nfs_stats_<NAME>", otherwise to get Basic Datamover statistics |
| 108 |
you have to name it "emc_vnx_file_basicdm_stats_<NAME>", where <NAME> is any |
| 109 |
arbitrary name of your storage system. The plugin will return <NAME> in its |
| 110 |
answer as "host_name" field. |
| 111 |
|
| 112 |
For example, assume your storage system is called "VNX5300". |
| 113 |
Make a configuration file at |
| 114 |
/etc/munin/plugin-conf.d/emc_vnx_file_stats_VNX5300 |
| 115 |
|
| 116 |
[emc_vnx_file_*] |
| 117 |
user munin |
| 118 |
env.username operator1 |
| 119 |
env.cs_addr 192.168.1.1 192.168.1.2 |
| 120 |
env.nas_servers server_2 server_3 |
| 121 |
|
| 122 |
Where: |
| 123 |
user - SSH Client local user |
| 124 |
env.username - Remote user with Operator role |
| 125 |
env.cs_addr - Control Stations addresses |
| 126 |
env.nas_servers - This is the default value and can be omitted |
| 127 |
|
| 128 |
=head1 HISTORY |
| 129 |
|
| 130 |
08.11.2016 - First Release |
| 131 |
17.11.2016 - NFSv4 support, Memory section |
| 132 |
16.12.2016 - Merged "NFS" and "Datamover Stats" plugins |
| 133 |
26.12.2016 - Compatibility with Munin coding style |
| 134 |
|
| 135 |
=cut |
| 136 |
|
| 137 |
export LANG=C |
| 138 |
|
| 139 |
. "$MUNIN_LIBDIR/plugins/plugin.sh" |
| 140 |
|
| 141 |
nas_server_ok="" |
| 142 |
cs_addr=${cs_addr:=""}
|
| 143 |
username=${username:=""}
|
| 144 |
nas_servers=${nas_servers:="server_2 server_3"}
|
| 145 |
|
| 146 |
# Prints "10" on stdout if found Primary Online control station. "11" - for Secondary Online control station. |
| 147 |
ssh_check_cmd() {
|
| 148 |
ssh -q "$username@$1" "/nasmcd/sbin/getreason | grep -w \"slot_\$(/nasmcd/sbin/t2slot)\" | cut -d- -f1 | awk '{print \$1}' "
|
| 149 |
|
| 150 |
} |
| 151 |
|
| 152 |
check_conf () {
|
| 153 |
if [ -z "$username" ]; then |
| 154 |
echo "No username ('username' environment variable)!"
|
| 155 |
return 1 |
| 156 |
fi |
| 157 |
|
| 158 |
if [ -z "$cs_addr" ]; then |
| 159 |
echo "No control station addresses ('cs_addr' environment variable)!"
|
| 160 |
return 1 |
| 161 |
fi |
| 162 |
|
| 163 |
#Choosing Cotrol Station. Code have to be "10" |
| 164 |
for CS in $cs_addr; do |
| 165 |
# shellcheck disable=SC2086 |
| 166 |
if [[ "10" = "$(ssh_check_cmd $CS)" ]]; then |
| 167 |
PRIMARY_CS=$CS |
| 168 |
break |
| 169 |
fi |
| 170 |
done |
| 171 |
|
| 172 |
if [ -z "$PRIMARY_CS" ]; then |
| 173 |
echo "No alive primary Control Station from list \"$cs_addr\""; |
| 174 |
return 1 |
| 175 |
fi |
| 176 |
return 0 |
| 177 |
} |
| 178 |
|
| 179 |
if [ "$1" = "autoconf" ]; then |
| 180 |
check_conf_ans=$(check_conf) |
| 181 |
if [ $? -eq 0 ]; then |
| 182 |
echo "yes" |
| 183 |
else |
| 184 |
echo "no ($check_conf_ans)" |
| 185 |
fi |
| 186 |
exit 0 |
| 187 |
fi |
| 188 |
|
| 189 |
if [ "$1" = "suggest" ]; then |
| 190 |
echo "nfs_stats" |
| 191 |
echo "basicdm_stats" |
| 192 |
exit 0; |
| 193 |
fi |
| 194 |
|
| 195 |
STATSTYPE=$(echo "${0##*/}" | cut -d _ -f 1-5)
|
| 196 |
if [ "$STATSTYPE" = "emc_vnx_file_nfs_stats" ]; then STATSTYPE=NFS; |
| 197 |
elif [ "$STATSTYPE" = "emc_vnx_file_basicdm_stats" ]; then STATSTYPE=BASICDM; |
| 198 |
else echo "Do not know what to do. Name the plugin as 'emc_vnx_file_nfs_stats_<HOSTNAME>' or 'emc_vnx_file_basicdm_stats_<HOSTNAME>'" >&2; exit 1; fi |
| 199 |
|
| 200 |
TARGET=$(echo "${0##*/}" | cut -d _ -f 6)
|
| 201 |
|
| 202 |
check_conf 1>&2 || exit 1 |
| 203 |
|
| 204 |
run_remote () {
|
| 205 |
# shellcheck disable=SC2029 |
| 206 |
ssh -q "$username@$PRIMARY_CS" ". /home/$username/.bash_profile; $*" |
| 207 |
} |
| 208 |
|
| 209 |
echo "host_name ${TARGET}"
|
| 210 |
|
| 211 |
if [ "$1" = "config" ] ; then |
| 212 |
# TODO: active/active |
| 213 |
for server in $nas_servers; do |
| 214 |
run_remote nas_server -i "$server" | grep -q 'type *= nas' || continue |
| 215 |
nas_server_ok=TRUE |
| 216 |
filtered_server="$(clean_fieldname "$server")" |
| 217 |
|
| 218 |
if [ "$STATSTYPE" = "BASICDM" ] ; then |
| 219 |
cat <<-EOF |
| 220 |
multigraph emc_vnx_cpu_percent |
| 221 |
graph_title EMC VNX 5300 Datamover CPU Util % |
| 222 |
graph_vlabel % |
| 223 |
graph_category cpu |
| 224 |
graph_scale no |
| 225 |
graph_args --upper-limit 100 -l 0 |
| 226 |
${server}_cpuutil.min 0
|
| 227 |
${server}_cpuutil.label $server CPU util. in %.
|
| 228 |
|
| 229 |
multigraph emc_vnx_network_b |
| 230 |
graph_title EMC VNX 5300 Datamover Network bytes over all interfaces |
| 231 |
graph_vlabel B/s recv. (-) / sent (+) |
| 232 |
graph_category network |
| 233 |
graph_args --base 1000 |
| 234 |
${server}_net_in.graph no
|
| 235 |
${server}_net_in.label none
|
| 236 |
${server}_net_out.label $server B/s
|
| 237 |
${server}_net_out.negative ${server}_net_in
|
| 238 |
${server}_net_out.draw AREA
|
| 239 |
|
| 240 |
multigraph emc_vnx_storage_b |
| 241 |
graph_title EMC VNX 5300 Datamover Storage bytes over all interfaces |
| 242 |
graph_vlabel B/s recv. (-) / sent (+) |
| 243 |
graph_category network |
| 244 |
graph_args --base 1000 |
| 245 |
${server}_stor_read.graph no
|
| 246 |
${server}_stor_read.label none
|
| 247 |
${server}_stor_write.label $server B/s
|
| 248 |
${server}_stor_write.negative ${server}_stor_read
|
| 249 |
${server}_stor_write.draw AREA
|
| 250 |
|
| 251 |
multigraph emc_vnx_memory |
| 252 |
graph_title EMC VNX 5300 Datamover Memory |
| 253 |
graph_vlabel KiB |
| 254 |
graph_category memory |
| 255 |
graph_args --base 1024 |
| 256 |
graph_order ${server}_used ${server}_free ${server}_total ${server}_freebuffer ${server}_encumbered
|
| 257 |
${server}_used.label ${server} Used
|
| 258 |
${server}_free.label ${server} Free
|
| 259 |
${server}_free.draw STACK
|
| 260 |
${server}_total.label ${server} Total
|
| 261 |
${server}_freebuffer.label ${server} Free Buffer
|
| 262 |
${server}_encumbered.label ${server} Encumbered
|
| 263 |
|
| 264 |
multigraph emc_vnx_filecache |
| 265 |
graph_title EMC VNX 5300 File Buffer Cache |
| 266 |
graph_vlabel per second |
| 267 |
graph_category memory |
| 268 |
graph_args --base 1000 |
| 269 |
graph_order ${server}_highw_hits ${server}_loww_hits ${server}_w_hits ${server}_hits ${server}_lookups
|
| 270 |
${server}_highw_hits.label High Watermark Hits
|
| 271 |
${server}_loww_hits.label Low Watermark Hits
|
| 272 |
${server}_loww_hits.draw STACK
|
| 273 |
${server}_w_hits.label Watermark Hits
|
| 274 |
${server}_hits.label Hits
|
| 275 |
${server}_lookups.label Lookups
|
| 276 |
|
| 277 |
multigraph emc_vnx_fileresolve |
| 278 |
graph_title EMC VNX 5300 FileResolve |
| 279 |
graph_vlabel Entries |
| 280 |
graph_category memory |
| 281 |
graph_args --base 1000 |
| 282 |
${server}_dropped.label Dropped Entries
|
| 283 |
${server}_max.label Max Limit
|
| 284 |
${server}_used.label Used Entries
|
| 285 |
EOF |
| 286 |
fi |
| 287 |
if [ "$STATSTYPE" = "NFS" ] ; then |
| 288 |
#nfs.v3.op data |
| 289 |
# [nasadmin@mnemonic0 ~]$ server_stats server_2 -info nfs.v3.op |
| 290 |
# server_2 : |
| 291 |
# |
| 292 |
# name = nfs.v3.op |
| 293 |
# description = NFS V3 per operation statistics |
| 294 |
# type = Set |
| 295 |
# member_stats = nfs.v3.op.ALL-ELEMENTS.calls,nfs.v3.op.ALL-ELEMENTS.failures,nfs.v3.op.ALL-ELEMENTS.avgTime,nfs.v3.op.ALL-ELEMENTS.opPct |
| 296 |
# member_elements = nfs.v3.op.v3Null,nfs.v3.op.v3GetAttr,nfs.v3.op.v3SetAttr,nfs.v3.op.v3Lookup,nfs.v3.op.v3Access,nfs.v3.op.v3ReadLink,nfs.v3.op.v3Read,nfs.v3.op.v3Write,nfs.v3.op.v3Create,nfs.v3.op.v3Mkdir,nfs.v3.op.v3Symlink,nfs.v3.op.v3Mknod,nfs.v3.op.v3Remove,nfs.v3.op.v3Rmdir,nfs.v3.op.v3Rename,nfs.v3.op.v3Link,nfs.v3.op.v3ReadDir,nfs.v3.op.v3ReadDirPlus,nfs.v3.op.v3FsStat,nfs.v3.op.v3FsInfo,nfs.v3.op.v3PathConf,nfs.v3.op.v3Commit,nfs.v3.op.VAAI |
| 297 |
# member_of = nfs.v3 |
| 298 |
member_elements_by_line=$(run_remote server_stats "$server" -info nfs.v3.op | grep member_elements | sed -ne 's/^.*= //p') |
| 299 |
IFS=',' read -ra graphs <<< "$member_elements_by_line" |
| 300 |
cat <<-EOF |
| 301 |
multigraph vnx_emc_v3_calls_s |
| 302 |
graph_title EMC VNX 5300 NFSv3 Calls per second |
| 303 |
graph_vlabel Calls |
| 304 |
graph_category nfs |
| 305 |
graph_args --base 1000 |
| 306 |
EOF |
| 307 |
for graph in "${graphs[@]}"; do
|
| 308 |
field=$(echo "$graph" | cut -d '.' -f4 ) |
| 309 |
echo "${server}_$field.label $server $field"
|
| 310 |
done |
| 311 |
|
| 312 |
cat <<-EOF |
| 313 |
|
| 314 |
multigraph vnx_emc_v3_usec_call |
| 315 |
graph_title EMC VNX 5300 NFSv3 uSeconds per call |
| 316 |
graph_vlabel uSec / call |
| 317 |
graph_category nfs |
| 318 |
graph_args --base 1000 |
| 319 |
EOF |
| 320 |
for graph in "${graphs[@]}"; do
|
| 321 |
field=$(echo "$graph" | cut -d '.' -f4 ) |
| 322 |
echo "${server}_$field.label $server $field"
|
| 323 |
done |
| 324 |
cat <<-EOF |
| 325 |
|
| 326 |
multigraph vnx_emc_v3_op_percent |
| 327 |
graph_title EMC VNX 5300 NFSv3 Op % |
| 328 |
graph_vlabel % |
| 329 |
graph_scale no |
| 330 |
graph_category nfs |
| 331 |
EOF |
| 332 |
for graph in "${graphs[@]}"; do
|
| 333 |
field=$(echo "$graph" | cut -d '.' -f4 ) |
| 334 |
echo "${server}_$field.label $server $field"
|
| 335 |
echo "${server}_$field.min 0"
|
| 336 |
done |
| 337 |
graphs=() |
| 338 |
#nfs.v4.op data |
| 339 |
member_elements_by_line=$(run_remote server_stats "$server" -info nfs.v4.op | grep member_elements | sed -ne 's/^.*= //p') |
| 340 |
IFS=',' read -ra graphs <<< "$member_elements_by_line" |
| 341 |
cat <<-EOF |
| 342 |
multigraph vnx_emc_v4_calls_s |
| 343 |
graph_title EMC VNX 5300 NFSv4 Calls per second |
| 344 |
graph_vlabel Calls |
| 345 |
graph_category nfs |
| 346 |
graph_args --base 1000 |
| 347 |
EOF |
| 348 |
for graph in "${graphs[@]}"; do
|
| 349 |
field=$(echo "$graph" | cut -d '.' -f4 ) |
| 350 |
echo "${server}_$field.label $server $field"
|
| 351 |
done |
| 352 |
|
| 353 |
cat <<-EOF |
| 354 |
|
| 355 |
multigraph vnx_emc_v4_usec_call |
| 356 |
graph_title EMC VNX 5300 NFSv4 uSeconds per call |
| 357 |
graph_vlabel uSec / call |
| 358 |
graph_category nfs |
| 359 |
graph_args --base 1000 |
| 360 |
EOF |
| 361 |
for graph in "${graphs[@]}"; do
|
| 362 |
field=$(echo "$graph" | cut -d '.' -f4 ) |
| 363 |
echo "${server}_$field.label $server $field"
|
| 364 |
done |
| 365 |
cat <<-EOF |
| 366 |
|
| 367 |
multigraph vnx_emc_v4_op_percent |
| 368 |
graph_title EMC VNX 5300 NFSv4 Op % |
| 369 |
graph_vlabel % |
| 370 |
graph_scale no |
| 371 |
graph_category nfs |
| 372 |
EOF |
| 373 |
for graph in "${graphs[@]}"; do
|
| 374 |
field=$(echo "$graph" | cut -d '.' -f4 ) |
| 375 |
echo "${server}_$field.label $server $field"
|
| 376 |
echo "${server}_$field.min 0"
|
| 377 |
done |
| 378 |
|
| 379 |
#nfs.client data |
| 380 |
# Total Read Write Suspicious Total Read Write Avg |
| 381 |
# Ops/s Ops/s Ops/s Ops diff KiB/s KiB/s KiB/s uSec/call |
| 382 |
member_elements_by_line=$(run_remote server_stats server_2 -monitor nfs.client -count 1 -terminationsummary no -titles never | sed -ne 's/^.*id=//p' | cut -d' ' -f1) |
| 383 |
#Somewhy readarray adds extra \n in the end of each variable. So, we use read() with a workaround |
| 384 |
IFS=$'\n' read -rd '' -a graphs_array <<< "$member_elements_by_line" |
| 385 |
cat <<-EOF |
| 386 |
|
| 387 |
multigraph vnx_emc_nfs_client_ops_s |
| 388 |
graph_title EMC VNX 5300 NFS Client Ops/s |
| 389 |
graph_vlabel Ops/s |
| 390 |
graph_category nfs |
| 391 |
EOF |
| 392 |
echo -n "graph_order " |
| 393 |
for graph in "${graphs_array[@]}"; do
|
| 394 |
field="$(clean_fieldname "_$graph")" |
| 395 |
echo -n "${server}${field}_r ${server}${field}_w ${server}${field}_t ${server}${field}_s "
|
| 396 |
done |
| 397 |
echo " " |
| 398 |
for graph in "${graphs_array[@]}"; do
|
| 399 |
field="$(clean_fieldname "_$graph")" |
| 400 |
echo "${server}${field}_r.label $server $graph Read Ops/s"
|
| 401 |
echo "${server}${field}_w.label $server $graph Write Ops/s"
|
| 402 |
echo "${server}${field}_w.draw STACK"
|
| 403 |
echo "${server}${field}_t.label $server $graph Total Ops/s"
|
| 404 |
echo "${server}${field}_s.label $server $graph Suspicious Ops diff"
|
| 405 |
done |
| 406 |
|
| 407 |
cat <<-EOF |
| 408 |
|
| 409 |
multigraph vnx_emc_nfs_client_b_s |
| 410 |
graph_title EMC VNX 5300 NFS Client B/s |
| 411 |
graph_vlabel B/s |
| 412 |
graph_category nfs |
| 413 |
EOF |
| 414 |
echo -n "graph_order " |
| 415 |
for graph in "${graphs_array[@]}"; do
|
| 416 |
field="$(clean_fieldname "_$graph")" |
| 417 |
echo -n "${server}${field}_r ${server}${field}_w ${server}${field}_t "
|
| 418 |
done |
| 419 |
echo " " |
| 420 |
for graph in "${graphs_array[@]}"; do
|
| 421 |
field="$(clean_fieldname "_$graph")" |
| 422 |
echo "${server}${field}_r.label $server $graph Read B/s"
|
| 423 |
echo "${server}${field}_w.label $server $graph Write B/s"
|
| 424 |
echo "${server}${field}_w.draw STACK"
|
| 425 |
echo "${server}${field}_t.label $server $graph Total B/s"
|
| 426 |
done |
| 427 |
|
| 428 |
cat <<-EOF |
| 429 |
|
| 430 |
multigraph vnx_emc_nfs_client_avg_usec |
| 431 |
graph_title EMC VNX 5300 NFS Client Avg uSec/call |
| 432 |
graph_vlabel uSec/call |
| 433 |
graph_category nfs |
| 434 |
EOF |
| 435 |
for graph in "${graphs_array[@]}"; do
|
| 436 |
field="$(clean_fieldname "_$graph")" |
| 437 |
echo "${server}${field}.label $server $graph Avg uSec/call"
|
| 438 |
done |
| 439 |
|
| 440 |
#nfs-std |
| 441 |
# Timestamp NFS Read Read Read Size Write Write Write Size Active |
| 442 |
# Ops/s Ops/s KiB/s Bytes Ops/s KiB/s Bytes Threads |
| 443 |
cat <<-EOF |
| 444 |
|
| 445 |
multigraph vnx_emc_nfs_std_nfs_ops |
| 446 |
graph_title EMC VNX 5300 Std NFS Ops/s |
| 447 |
graph_vlabel Ops/s |
| 448 |
graph_category nfs |
| 449 |
EOF |
| 450 |
echo "graph_order ${filtered_server}_rops ${filtered_server}_wops ${filtered_server}_tops"
|
| 451 |
echo "${filtered_server}_rops.label $server Read Ops/s"
|
| 452 |
echo "${filtered_server}_wops.label $server Write Ops/s"
|
| 453 |
echo "${filtered_server}_wops.draw STACK"
|
| 454 |
echo "${filtered_server}_tops.label $server Total Ops/s"
|
| 455 |
|
| 456 |
cat <<-EOF |
| 457 |
|
| 458 |
multigraph vnx_emc_nfs_std_nfs_b_s |
| 459 |
graph_title EMC VNX 5300 Std NFS B/s |
| 460 |
graph_vlabel B/s |
| 461 |
graph_category nfs |
| 462 |
EOF |
| 463 |
echo "graph_order ${filtered_server}_rbs ${filtered_server}_wbs ${filtered_server}_tbs"
|
| 464 |
echo "${filtered_server}_rbs.label $server Read B/s"
|
| 465 |
echo "${filtered_server}_wbs.label $server Write B/s"
|
| 466 |
echo "${filtered_server}_wbs.draw STACK"
|
| 467 |
echo "${filtered_server}_tbs.label $server Total B/s"
|
| 468 |
echo "${filtered_server}_tbs.cdef ${filtered_server}_rbs,${filtered_server}_wbs,+"
|
| 469 |
|
| 470 |
cat <<-EOF |
| 471 |
|
| 472 |
multigraph vnx_emc_nfs_std_nfs_avg |
| 473 |
graph_title EMC VNX 5300 Std NFS Average Size Bytes |
| 474 |
graph_vlabel Bytes |
| 475 |
graph_category nfs |
| 476 |
EOF |
| 477 |
echo "${filtered_server}_avg_readsize.label $server Average Read Size Bytes"
|
| 478 |
echo "${filtered_server}_avg_writesize.label $server Average Write Size Bytes"
|
| 479 |
|
| 480 |
cat <<-EOF |
| 481 |
|
| 482 |
multigraph vnx_emc_nfs_std_nfs_threads |
| 483 |
graph_title EMC VNX 5300 Std NFS Active Threads |
| 484 |
graph_vlabel Threads |
| 485 |
graph_category nfs |
| 486 |
EOF |
| 487 |
echo "${filtered_server}_threads.label $server Active Threads"
|
| 488 |
fi |
| 489 |
done |
| 490 |
if [ -z "$nas_server_ok" ]; then |
| 491 |
echo "No active data movers!" 1>&2 |
| 492 |
fi |
| 493 |
exit 0 |
| 494 |
fi |
| 495 |
|
| 496 |
for server in $nas_servers; do |
| 497 |
run_remote nas_server -i "$server" | grep -q 'type *= nas' || continue |
| 498 |
nas_server_ok=TRUE |
| 499 |
filtered_server="$(clean_fieldname "$server")" |
| 500 |
|
| 501 |
if [ "$STATSTYPE" = "BASICDM" ] ; then |
| 502 |
#basicdm data |
| 503 |
# [nasadmin@mnemonic0 ~]$ server_stats server_2 -count 1 -terminationsummary no |
| 504 |
# server_2 CPU Network Network dVol dVol |
| 505 |
# Timestamp Util In Out Read Write |
| 506 |
# % KiB/s KiB/s KiB/s KiB/s |
| 507 |
# 20:42:26 9 16432 3404 1967 24889 |
| 508 |
|
| 509 |
member_elements_by_line=$(run_remote server_stats "$server" -count 1 -terminationsummary no -titles never | grep '^[^[:space:]]') |
| 510 |
IFS=$' ' read -ra graphs <<< "$member_elements_by_line" |
| 511 |
|
| 512 |
echo "multigraph emc_vnx_cpu_percent" |
| 513 |
echo "${server}_cpuutil.value ${graphs[1]}"
|
| 514 |
|
| 515 |
echo -e "\nmultigraph emc_vnx_network_b" |
| 516 |
echo "${server}_net_in.value $((graphs[2] * 1024))"
|
| 517 |
echo "${server}_net_out.value $((graphs[3] * 1024))"
|
| 518 |
|
| 519 |
echo -e "\nmultigraph emc_vnx_storage_b" |
| 520 |
echo "${server}_stor_read.value $((graphs[4] * 1024))"
|
| 521 |
echo "${server}_stor_write.value $((graphs[5] * 1024))"
|
| 522 |
|
| 523 |
# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor kernel.memory -count 1 -terminationsummary no |
| 524 |
# server_2 Free Buffer Buffer Buffer Buffer Buffer Buffer Cache Encumbered FileResolve FileResolve FileResolve Free KiB Page Total Used KiB Memory |
| 525 |
# Timestamp Buffer Cache High Cache Cache Cache Cache Low Watermark Memory Dropped Max Used Size Memory Util |
| 526 |
# KiB Watermark Hits/s Hit % Hits/s Lookups/s Watermark Hits/s Hits/s KiB Entries Limit Entries KiB KiB % |
| 527 |
# 20:44:14 3522944 0 96 11562 12010 0 0 3579268 0 0 0 3525848 8 6291456 2765608 44 |
| 528 |
|
| 529 |
member_elements_by_line=$(run_remote server_stats "$server" -monitor kernel.memory -count 1 -terminationsummary no -titles never | grep '^[^[:space:]]') |
| 530 |
IFS=$' ' read -ra graphs <<< "$member_elements_by_line" |
| 531 |
|
| 532 |
echo -e "\nmultigraph emc_vnx_memory" |
| 533 |
#Reserved for math |
| 534 |
echo "${server}_total.value $((graphs[14] / 1))"
|
| 535 |
echo "${server}_used.value $((graphs[15] / 1))"
|
| 536 |
echo "${server}_free.value $((graphs[12] / 1))"
|
| 537 |
echo "${server}_freebuffer.value $((graphs[1] / 1))"
|
| 538 |
echo "${server}_encumbered.value $((graphs[8] / 1))"
|
| 539 |
|
| 540 |
echo -e "\nmultigraph emc_vnx_filecache" |
| 541 |
echo "${server}_highw_hits.value ${graphs[2]}"
|
| 542 |
echo "${server}_loww_hits.value ${graphs[6]}"
|
| 543 |
echo "${server}_w_hits.value ${graphs[7]}"
|
| 544 |
echo "${server}_hits.value ${graphs[4]}"
|
| 545 |
echo "${server}_lookups.value ${graphs[5]}"
|
| 546 |
|
| 547 |
echo -e "\nmultigraph emc_vnx_fileresolve" |
| 548 |
echo "${server}_dropped.value ${graphs[9]}"
|
| 549 |
echo "${server}_max.value ${graphs[10]}"
|
| 550 |
echo "${server}_used.value ${graphs[11]}"
|
| 551 |
|
| 552 |
|
| 553 |
fi |
| 554 |
if [ "$STATSTYPE" = "NFS" ] ; then |
| 555 |
#nfs.v3.op data |
| 556 |
# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.v3.op -count 1 -terminationsummary no |
| 557 |
# server_2 NFS Op NFS NFS Op NFS NFS Op % |
| 558 |
# Timestamp Op Errors Op |
| 559 |
# Calls/s diff uSec/Call |
| 560 |
# 22:14:41 v3GetAttr 30 0 23 21 |
| 561 |
# v3Lookup 40 0 98070 27 |
| 562 |
# v3Access 50 0 20 34 |
| 563 |
# v3Read 4 0 11180 3 |
| 564 |
# v3Write 2 0 2334 1 |
| 565 |
# v3Create 1 0 1743 1 |
| 566 |
# v3Mkdir 13 0 953 9 |
| 567 |
# v3Link 6 0 1064 4 |
| 568 |
|
| 569 |
member_elements_by_line=$(run_remote server_stats "$server" -monitor nfs.v3.op -count 1 -terminationsummary no -titles never | sed -ne 's/^.*v3/v3/p') |
| 570 |
NUMCOL=5 |
| 571 |
LINES=$(wc -l <<< "$member_elements_by_line") |
| 572 |
while IFS=$'\n' read -ra graphs ; do |
| 573 |
elements_array+=( $graphs ) |
| 574 |
done <<< "$member_elements_by_line" |
| 575 |
|
| 576 |
if [ "${#elements_array[@]}" -eq "0" ]; then LINES=0; fi
|
| 577 |
|
| 578 |
echo "multigraph vnx_emc_v3_calls_s" |
| 579 |
for ((i=0; i<$((LINES)); i++ )); do |
| 580 |
echo "${server}_${elements_array[i*$NUMCOL]}".value "${elements_array[i*$NUMCOL+1]}"
|
| 581 |
done |
| 582 |
|
| 583 |
echo -e "\nmultigraph vnx_emc_v3_usec_call" |
| 584 |
for ((i=0; i<$((LINES)); i++ )); do |
| 585 |
echo "${server}_${elements_array[i*$NUMCOL]}".value "${elements_array[i*$NUMCOL+3]}"
|
| 586 |
done |
| 587 |
|
| 588 |
echo -e "\nmultigraph vnx_emc_v3_op_percent" |
| 589 |
for ((i=0; i<$((LINES)); i++ )); do |
| 590 |
echo "${server}_${elements_array[i*$NUMCOL]}".value "${elements_array[i*$NUMCOL+4]}"
|
| 591 |
done |
| 592 |
|
| 593 |
elements_array=() |
| 594 |
|
| 595 |
#nfs.v4.op data |
| 596 |
# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.v4.op -count 1 -terminationsummary no |
| 597 |
# server_2 NFS Op NFS NFS Op NFS NFS Op % |
| 598 |
# Timestamp Op Errors Op |
| 599 |
# Calls/s diff uSec/Call |
| 600 |
# 22:13:14 v4Compound 2315 0 7913 30 |
| 601 |
# v4Access 246 0 5 3 |
| 602 |
# v4Close 133 0 11 2 |
| 603 |
# v4Commit 2 0 6928 0 |
| 604 |
# v4Create 1 0 881 0 |
| 605 |
# v4DelegRet 84 0 19 1 |
| 606 |
# v4GetAttr 1330 0 7 17 |
| 607 |
# v4GetFh 164 0 3 2 |
| 608 |
# v4Lookup 68 0 43 1 |
| 609 |
# v4Open 132 0 1061 2 |
| 610 |
# v4PutFh 2314 0 11 30 |
| 611 |
# v4Read 359 0 15561 5 |
| 612 |
# v4ReadDir 1 0 37 0 |
| 613 |
# v4Remove 62 0 1096 1 |
| 614 |
# v4Rename 1 0 947 0 |
| 615 |
# v4Renew 2 0 3 0 |
| 616 |
# v4SaveFh 1 0 3 0 |
| 617 |
# v4SetAttr 9 0 889 0 |
| 618 |
# v4Write 525 0 16508 7 |
| 619 |
|
| 620 |
member_elements_by_line=$(run_remote server_stats "$server" -monitor nfs.v4.op -count 1 -terminationsummary no -titles never | sed -ne 's/^.*v4/v4/p') |
| 621 |
NUMCOL=5 |
| 622 |
LINES=$(wc -l <<< "$member_elements_by_line") |
| 623 |
while IFS=$'\n' read -ra graphs ; do |
| 624 |
elements_array+=( $graphs ) |
| 625 |
done <<< "$member_elements_by_line" |
| 626 |
|
| 627 |
if [ "${#elements_array[@]}" -eq "0" ]; then LINES=0; fi
|
| 628 |
|
| 629 |
echo -e "\nmultigraph vnx_emc_v4_calls_s" |
| 630 |
for ((i=0; i<$((LINES)); i++ )); do |
| 631 |
echo "${server}_${elements_array[i*$NUMCOL]}".value "${elements_array[i*$NUMCOL+1]}"
|
| 632 |
done |
| 633 |
|
| 634 |
echo -e "\nmultigraph vnx_emc_v4_usec_call" |
| 635 |
for ((i=0; i<$((LINES)); i++ )); do |
| 636 |
echo "${server}_${elements_array[i*$NUMCOL]}".value "${elements_array[i*$NUMCOL+3]}"
|
| 637 |
done |
| 638 |
|
| 639 |
echo -e "\nmultigraph vnx_emc_v4_op_percent" |
| 640 |
for ((i=0; i<$((LINES)); i++ )); do |
| 641 |
echo "${server}_${elements_array[i*$NUMCOL]}".value "${elements_array[i*$NUMCOL+4]}"
|
| 642 |
done |
| 643 |
|
| 644 |
elements_array=() |
| 645 |
|
| 646 |
#nfs.client data |
| 647 |
# [nasadmin@mnemonic0 ~]$ server_stats server_2 -monitor nfs.client -count 1 -terminationsummary no |
| 648 |
# server_2 Client NFS NFS NFS NFS NFS NFS NFS NFS |
| 649 |
# Timestamp Total Read Write Suspicious Total Read Write Avg |
| 650 |
# Ops/s Ops/s Ops/s Ops diff KiB/s KiB/s KiB/s uSec/call |
| 651 |
# 20:26:38 id=192.168.1.223 2550 20 2196 13 4673 159 4514 1964 |
| 652 |
# id=192.168.1.2 691 4 5 1 1113 425 688 2404 |
| 653 |
# id=192.168.1.1 159 0 0 51 0 0 0 6017 |
| 654 |
# id=192.168.1.6 37 4 2 0 586 295 291 5980 |
| 655 |
# id=192.168.1.235 21 1 0 0 0 0 0 155839 |
| 656 |
# id=192.168.1.224 5 0 5 0 20 0 20 704620 |
| 657 |
|
| 658 |
member_elements_by_line=$(run_remote server_stats server_2 -monitor nfs.client -count 1 -terminationsummary no -titles never | sed -ne 's/^.*id=//p') |
| 659 |
echo -e "\nmultigraph vnx_emc_nfs_client_ops_s" |
| 660 |
NUMCOL=9 |
| 661 |
LINES=$(wc -l <<< "$member_elements_by_line") |
| 662 |
while IFS=$'\n' read -ra graphs; do |
| 663 |
elements_array+=($graphs) |
| 664 |
done <<< "$member_elements_by_line" |
| 665 |
|
| 666 |
#Not drawing elements in case of empty set |
| 667 |
if [ "${#elements_array[@]}" -eq "0" ]; then LINES=0; fi
|
| 668 |
|
| 669 |
for (( i=0; i<$((LINES)); i++ )); do |
| 670 |
client="$(clean_fieldname "_${elements_array[i*$NUMCOL]}")"
|
| 671 |
echo "${server}${client}_r".value "${elements_array[$i*$NUMCOL+2]}"
|
| 672 |
echo "${server}${client}_w".value "${elements_array[$i*$NUMCOL+3]}"
|
| 673 |
echo "${server}${client}_t".value "${elements_array[$i*$NUMCOL+1]}"
|
| 674 |
echo "${server}${client}_s".value "${elements_array[$i*$NUMCOL+4]}"
|
| 675 |
done |
| 676 |
echo -e "\nmultigraph vnx_emc_nfs_client_b_s" |
| 677 |
for (( i=0; i<$((LINES)); i++ )); do |
| 678 |
client="$(clean_fieldname "_${elements_array[i*$NUMCOL]}")"
|
| 679 |
echo "${server}${client}_r".value "$((${elements_array[$i*$NUMCOL+6]} * 1024))"
|
| 680 |
echo "${server}${client}_w".value "$((${elements_array[$i*$NUMCOL+7]} * 1024))"
|
| 681 |
echo "${server}${client}_t".value "$((${elements_array[$i*$NUMCOL+5]} * 1024))"
|
| 682 |
done |
| 683 |
echo -e "\nmultigraph vnx_emc_nfs_client_avg_usec" |
| 684 |
for (( i=0; i<$((LINES)); i++ )); do |
| 685 |
client="$(clean_fieldname "_${elements_array[i*$NUMCOL]}")"
|
| 686 |
echo "${server}${client}".value "${elements_array[$i*$NUMCOL+8]}"
|
| 687 |
done |
| 688 |
|
| 689 |
#nfs-std |
| 690 |
# bash-3.2$ server_stats server_2 -monitor nfs-std |
| 691 |
# server_2 Total NFS NFS NFS Avg NFS NFS NFS Avg NFS |
| 692 |
# Timestamp NFS Read Read Read Size Write Write Write Size Active |
| 693 |
# Ops/s Ops/s KiB/s Bytes Ops/s KiB/s Bytes Threads |
| 694 |
# 18:14:52 688 105 6396 62652 1 137 174763 3 |
| 695 |
member_elements_by_line=$(run_remote server_stats "$server" -monitor nfs-std -count 1 -terminationsummary no -titles never | grep '^[^[:space:]]') |
| 696 |
IFS=$' ' read -ra graphs <<< "$member_elements_by_line" |
| 697 |
# echo "$member_elements_by_line" |
| 698 |
# echo "${graphs[@]}"
|
| 699 |
|
| 700 |
echo -e "\nmultigraph vnx_emc_nfs_std_nfs_ops" |
| 701 |
echo "${filtered_server}_rops.value ${graphs[2]}"
|
| 702 |
echo "${filtered_server}_wops.value ${graphs[5]}"
|
| 703 |
echo "${filtered_server}_tops.value ${graphs[1]}"
|
| 704 |
|
| 705 |
echo -e "\nmultigraph vnx_emc_nfs_std_nfs_b_s" |
| 706 |
echo "${filtered_server}_rbs.value $((graphs[3] * 1024))"
|
| 707 |
echo "${filtered_server}_wbs.value $((graphs[6] * 1024))"
|
| 708 |
echo "${filtered_server}_tbs.value 0"
|
| 709 |
|
| 710 |
|
| 711 |
echo -e "\nmultigraph vnx_emc_nfs_std_nfs_avg" |
| 712 |
echo "${filtered_server}_avg_readsize.value ${graphs[4]}"
|
| 713 |
echo "${filtered_server}_avg_writesize.value ${graphs[7]}"
|
| 714 |
|
| 715 |
|
| 716 |
echo -e "\nmultigraph vnx_emc_nfs_std_nfs_threads" |
| 717 |
echo "${filtered_server}_threads.value ${graphs[8]}"
|
| 718 |
fi |
| 719 |
done |
| 720 |
if [ -z "$nas_server_ok" ]; then |
| 721 |
echo "No active data movers!" 1>&2 |
| 722 |
fi |
| 723 |
exit 0 |
| 724 |
|
