监控Docker Swarm集群的资源使用情况

监控Docker Swarm集群的资源使用情况,第1张

监控Docker Swarm集群的资源使用情况

1.在主节点得到各个节点的ip,并得到每个ip对应的容器ID

2.通过ssh访问每个ip

3.访问/sys/fs/cgroupdocker文件,利用容器ID进行对应修改

#!/bin/bash

node=`docker node ls -q`
for str in $node
do
	echo $str
	com=`docker node inspect $str|grep Addr`
	com=${com#*:}
	com=${com#*"}
	com=${com%%"*}
	echo $com >> ip.txt
done
#!/usr/bin/env python3
import paramiko
import os

class Shell():
	
	def __init__(self,host,port,user,pwd):
		self.host=host
		self.port=port
		self.user=user
		self.pwd=pwd
	
	def connet(self):
		ssh = paramiko.SSHClient()
		ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
		ssh.connect(self.host,self.port,self.user,self.pwd)
		chan=ssh.invoke_shell()
		command = 'docker ps -a -q'
		
		stdin,stdout,stderr=ssh.exec_command(command)
		f = open("docker_ID.txt", "a")
		list=stdout.readlines()
		for i in list:
			print(i)
			line = self.host+i
			f.write(line)
		f.close()
	#	print (list)

if __name__ == '__main__':
	
	iptxt = open(r"./ip.txt")
	lines = iptxt.readlines()
	iptxt.close()
	os.system('touch docker_ID.txt')
	for i in lines:
		print(i)
		test=Shell(i,'22','root','sdcloud@123')
		test.connet()
	print("Done")
#!/usr/bin/env python3
import paramiko
import os
from multiprocessing import Pool
import dill

class Conn():

	def __init__(self,host,port,user,pwd):
		self.host = host
		self.port = port
		self.user = user
		self.pwd = pwd
		
def collect_Mem(id,ssh):

	id = id.replace('n','')
	comm = 'cat /sys/fs/cgroup/memory/docker/'+id+'*/memory.usage_in_bytes'
	stdin,stdout,stderr=ssh.exec_command(comm)
	list=stdout.readlines()
	for i in list:
		num = i
	#print(num)
	comm = 'cat /sys/fs/cgroup/memory/docker/'+id+'*/memory.limit_in_bytes'
	stdin,stdout,stderr=ssh.exec_command(comm)
	list=stdout.readlines()
	for i in list:
		num1 = i
	num = int(num)
	num1 = int(num1)
	ans = 1.0*num/num1
	ans = str(ans)
	ans = ans + 'n'
#	path = 'touch logs/'+id+'/mem_usage.txt'
#	os.system(path)
	name='./logs/'+id+'/mem_usage.txt'
	with open(name,"w") as f:
		f.write(ans)
	f.close()
	ssh.close()

def collect_CPU(id,ssh):

	id = id.replace('n','')
	comm = 'cat /sys/fs/cgroup/cpu/docker/'+id+'*/cpuacct.usage'
	stdin,stdout,stderr=ssh.exec_command(comm)
	list=stdout.readlines()	
#	path = 'touch logs/'+id+'/cpu_usage.txt'
#	os.system(path)
	name='logs/'+id+'/cpu_usage.txt'
	f = open(name,"w")
	for i in list:
		f.write(i)
	f.close()
#	ssh.close()

def con(test,id):

	ssh = paramiko.SSHClient()
	ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
	ssh.connect(test.host,test.port,test.user,test.pwd)
#	collect_CPU(id,ssh)
#	collect_Mem(id,ssh)

if __name__ == '__main__':
	
	iptxt = open(r"./docker_ID.txt")
	lines = iptxt.readlines()
	iptxt.close()
	p = Pool(8)
#	os.system('mkdir logs')
	for i in range(len(lines)):
		if i%2==1:
			continue;
	#	comm = 'mkdir -p logs/'+lines[i+1]
	#	os.system(comm)
		test=Conn(lines[i],'22','root','sdcloud@123')
		id = lines[i+1]
		p.apply_async(con,[test,id])
	p.close()
	p.join()
	print("Done")		

按顺序依次执行三个文件,注意首次执行时取消建立文件夹的注释。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/zaji/4830669.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-11-10
下一篇 2022-11-10

发表评论

登录后才能评论

评论列表(0条)

保存