root / plugins / docker / docker_ @ fcd2af7b
Historique | Voir | Annoter | Télécharger (6,75 ko)
| 1 |
#!/usr/bin/env python3 |
|---|---|
| 2 |
""" |
| 3 |
=head1 NAME |
| 4 |
|
| 5 |
docker_ - Docker wildcard-plugin to monitor a L<Docker|https://www.docker.com> host. |
| 6 |
|
| 7 |
This wildcard plugin provides at the moment only the suffixes C<containers>, C<images>, C<status>, |
| 8 |
C<volumes>, C<cpu> and C<memory>. |
| 9 |
|
| 10 |
=head1 INSTALLATION |
| 11 |
|
| 12 |
- Copy this plugin in your munin plugins directory |
| 13 |
- Install Python3 "docker" package |
| 14 |
|
| 15 |
=over 2 |
| 16 |
|
| 17 |
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_containers |
| 18 |
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_cpu |
| 19 |
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_images |
| 20 |
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_memory |
| 21 |
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_status |
| 22 |
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_volumes |
| 23 |
|
| 24 |
=back |
| 25 |
|
| 26 |
After the installation you need to restart your munin-node: |
| 27 |
|
| 28 |
=over 2 |
| 29 |
|
| 30 |
systemctl restart munin-node |
| 31 |
|
| 32 |
=back |
| 33 |
|
| 34 |
=head1 CONFIGURATION |
| 35 |
|
| 36 |
This plugin need to run as root, you need to create a file named docker placed in the |
| 37 |
directory /etc/munin/plugin-conf.d/ with the following config (you can also use |
| 38 |
Docker environment variables here as described in |
| 39 |
https://docs.docker.com/compose/reference/envvars/): |
| 40 |
|
| 41 |
=over 2 |
| 42 |
|
| 43 |
[docker_*] |
| 44 |
user root |
| 45 |
env.DOCKER_HOST unix://var/run/docker.sock |
| 46 |
|
| 47 |
=back |
| 48 |
""" |
| 49 |
|
| 50 |
import os |
| 51 |
import sys |
| 52 |
import docker |
| 53 |
from multiprocessing import Process, Queue |
| 54 |
|
| 55 |
|
| 56 |
def print_containers_status(client): |
| 57 |
running = 0 |
| 58 |
paused = 0 |
| 59 |
created = 0 |
| 60 |
restarting = 0 |
| 61 |
removing = 0 |
| 62 |
exited = 0 |
| 63 |
dead = 0 |
| 64 |
for container in client.containers.list(): |
| 65 |
if container.status == 'running': |
| 66 |
running += 1 |
| 67 |
elif container.status == 'paused': |
| 68 |
paused += 1 |
| 69 |
elif container.status == 'created': |
| 70 |
created += 1 |
| 71 |
elif container.status == 'restarting': |
| 72 |
restarting += 1 |
| 73 |
elif container.status == 'removing': |
| 74 |
removing += 1 |
| 75 |
elif container.status == 'exited': |
| 76 |
exited += 1 |
| 77 |
elif container.status == 'dead': |
| 78 |
dead += 1 |
| 79 |
print('running.value', running)
|
| 80 |
print('paused.value', paused)
|
| 81 |
print('created.value', created)
|
| 82 |
print('restarting.value', restarting)
|
| 83 |
print('removing.value', removing)
|
| 84 |
print('exited.value', exited)
|
| 85 |
print('dead.value', dead)
|
| 86 |
|
| 87 |
|
| 88 |
def get_container_stats(container, q): |
| 89 |
q.put(container.stats(stream=False)) |
| 90 |
|
| 91 |
|
| 92 |
def parallel_container_stats(client): |
| 93 |
proc_list = [] |
| 94 |
stats = {}
|
| 95 |
for container in client.containers.list(): |
| 96 |
q = Queue() |
| 97 |
p = Process(target=get_container_stats, args=(container, q)) |
| 98 |
proc_list.append({'proc': p, 'queue': q, 'container': container})
|
| 99 |
p.start() |
| 100 |
for proc in proc_list: |
| 101 |
proc['proc'].join() |
| 102 |
stats[proc['container']] = proc['queue'].get() |
| 103 |
return stats.items() |
| 104 |
|
| 105 |
|
| 106 |
def print_containers_cpu(client): |
| 107 |
for container, stats in parallel_container_stats(client): |
| 108 |
cpu_count = len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"]) |
| 109 |
cpu_percent = 0.0 |
| 110 |
cpu_delta = float(stats["cpu_stats"]["cpu_usage"]["total_usage"]) \ |
| 111 |
- float(stats["precpu_stats"]["cpu_usage"]["total_usage"]) |
| 112 |
system_delta = float(stats["cpu_stats"]["system_cpu_usage"]) \ |
| 113 |
- float(stats["precpu_stats"]["system_cpu_usage"]) |
| 114 |
if system_delta > 0.0: |
| 115 |
cpu_percent = cpu_delta / system_delta * 100.0 * cpu_count |
| 116 |
print(container.name + '.value', cpu_percent) |
| 117 |
|
| 118 |
|
| 119 |
def print_containers_memory(client): |
| 120 |
for container, stats in parallel_container_stats(client): |
| 121 |
print(container.name + '.value', stats['memory_stats']['stats']['total_rss']) |
| 122 |
|
| 123 |
|
| 124 |
def main(): |
| 125 |
try: |
| 126 |
mode = sys.argv[1] |
| 127 |
except IndexError: |
| 128 |
mode = "" |
| 129 |
wildcard = sys.argv[0].split("docker_")[1].split("_")[0]
|
| 130 |
|
| 131 |
if mode == "suggest": |
| 132 |
print("containers")
|
| 133 |
print("cpu")
|
| 134 |
print("images")
|
| 135 |
print("memory")
|
| 136 |
print("status")
|
| 137 |
print("volumes")
|
| 138 |
|
| 139 |
client = docker.from_env() |
| 140 |
|
| 141 |
if wildcard == "status": |
| 142 |
if mode == "config": |
| 143 |
print("graph_title Docker status")
|
| 144 |
print("graph_vlabel containers")
|
| 145 |
print("graph_category virtualization")
|
| 146 |
print("running.label RUNNING")
|
| 147 |
print("paused.label PAUSED")
|
| 148 |
print("created.label CREATED")
|
| 149 |
print("restarting.label RESTARTING")
|
| 150 |
print("removing.label REMOVING")
|
| 151 |
print("exited.label EXITED")
|
| 152 |
print("dead.label DEAD")
|
| 153 |
else: |
| 154 |
print_containers_status(client) |
| 155 |
elif wildcard == "containers": |
| 156 |
if mode == "config": |
| 157 |
print("graph_title Docker containers")
|
| 158 |
print("graph_vlabel containers")
|
| 159 |
print("graph_category virtualization")
|
| 160 |
print("containers_quantity.label Containers")
|
| 161 |
else: |
| 162 |
print('containers_quantity.value', len(client.containers.list()))
|
| 163 |
elif wildcard == "images": |
| 164 |
if mode == "config": |
| 165 |
print("graph_title Docker images")
|
| 166 |
print("graph_vlabel images")
|
| 167 |
print("graph_category virtualization")
|
| 168 |
print("images_quantity.label Images")
|
| 169 |
else: |
| 170 |
print('images_quantity.value', len(client.images.list()))
|
| 171 |
elif wildcard == "volumes": |
| 172 |
if mode == "config": |
| 173 |
print("graph_title Docker volumes")
|
| 174 |
print("graph_vlabel volumes")
|
| 175 |
print("graph_category virtualization")
|
| 176 |
print("volumes_quantity.label Volumes")
|
| 177 |
else: |
| 178 |
print('volumes_quantity.value', len(client.volumes.list()))
|
| 179 |
elif wildcard == "cpu": |
| 180 |
if mode == "config": |
| 181 |
graphlimit = str(os.cpu_count() * 100) |
| 182 |
print("graph_title Docker containers CPU usage")
|
| 183 |
print("graph_args --base 1000 -r --lower-limit 0 --upper-limit " + graphlimit)
|
| 184 |
print("graph_scale no")
|
| 185 |
print("graph_period second")
|
| 186 |
print("graph_vlabel CPU usage (%)")
|
| 187 |
print("graph_category virtualization")
|
| 188 |
print("graph_info This graph shows docker container CPU usage.")
|
| 189 |
for container in client.containers.list(): |
| 190 |
print("{}.label {}".format(container.name, container.name))
|
| 191 |
else: |
| 192 |
print_containers_cpu(client) |
| 193 |
elif wildcard == "memory": |
| 194 |
if mode == "config": |
| 195 |
print("graph_title Docker containers memory usage")
|
| 196 |
print("graph_args --base 1024 -l 0")
|
| 197 |
print("graph_vlabel Bytes")
|
| 198 |
print("graph_category virtualization")
|
| 199 |
print("graph_info This graph shows docker container memory usage.")
|
| 200 |
for container in client.containers.list(): |
| 201 |
print("{}.label {}".format(container.name, container.name))
|
| 202 |
else: |
| 203 |
print_containers_memory(client) |
| 204 |
|
| 205 |
|
| 206 |
if __name__ == '__main__': |
| 207 |
main() |
