常用脚本之Hadoop、ZK、Hive

常用脚本之Hadoop、ZK、Hive,第1张

常用脚本之Hadoop、ZK、Hive 1.集群文件 分发脚本
# 集群之间分发文件

if [[ $# -lt 1 ]]; then
	echo Arguenment Error!
	exit
fi

for host in hadoop102 hadoop103 hadoop104
do
	for file in $@; do
		if [[ -e $file ]]; then
			pdir=$(cd -P $(dirname $file);pwd)
			fname=$(basename $file)
			ssh $host "mkdir -p $pdir"
			rsync -av $pdir/$fname $host:$pdir
		else
			echo $file not exits!
		fi
	done
done
2.Hadoop 启停脚本
# hadoop 集群启动停止脚本

if [[ $# -lt 1 ]]; then
	echo "Argument Error"
	exit
fi

case  in
	"start" )
				echo "------start hadoop cluster------"
				echo "------start hdfs------"
				ssh hadoop102 "start-dfs.sh"
				echo "start yarn"
				ssh hadoop103 "start-yarn.sh"
				echo "------start historyserver------"
				ssh hadoop103 "mr-jobhistory-daemon.sh start historyserver"
	;;
	"stop" )
				echo "------stop hadoop cluster------"
				echo "------stop historyserver------"
				ssh hadoop103 "mr-jobhistory-daemon.sh stop historyserver"
				echo "------stop yarn------"
				ssh hadoop103 "stop-yarn.sh"
				echo "------stop hdfs------"
				ssh hadoop102 "stop-dfs.sh"
	;;
	* )
				echo "Argument Error"
	;;
esac
3. ZK 启停脚本
#!/bin/bash

case  in
	"start" )
for i in hadoop102 hadoop103 hadoop104; do
	echo "----------- $i start -------"
	ssh $i "/opt/module/zookeeper-3.5.7/bin/zkServer.sh start"
done
		;;
	"stop" )
for i in hadoop102 hadoop103 hadoop104; do
		echo "----------- $i stop -------"
	ssh $i "/opt/module/zookeeper-3.5.7/bin/zkServer.sh stop"
done
		;;
	"status" )
for i in hadoop102 hadoop103 hadoop104; do
	echo "----------- $i status -------"
	ssh $i "/opt/module/zookeeper-3.5.7/bin/zkServer.sh status"
done
		;;
	* )
echo "Args Error"
esac
4. JPS 进程查看脚本
# hadoop 集群 jps 查看进程
for host in hadoop102 hadoop103 hadoop104; do
	echo "------$host-------"
	ssh $host jps
done
5. Hive meta和server2 启停脚本
#!/bin/bash
HIVE_LOG_DIR=$HIVE_HOME/logs
if [[ ! -d $HIVE_LOG_DIR ]]; then
	mkdir -p $HIVE_LOG_DIR
fi

function check_process(){
	pid=$(ps -ef |grep -v grep |grep -i $1 |awk '{print $2}')
	echo $pid
	[ "$pid" ] && return 0 || return 1
}

function hive_start(){
	metapid=$(check_process Hivemetastore 9083)
	cmd="nohup hive --service metastore > $HIVE_LOG_DIR/metastore.log 2>&1 &"
	[ -z "$metapid" ] && eval $cmd && echo "Hivemetastore Is Starting!" || echo "Hivemetastore Is Running!"
	server2pid=$(check_process HiveServer2 10000)
	cmd="nohup hive --service hiveserver2  >$HIVE_LOG_DIR/HiveServer2.log 2>&1  &"
	[ -z "$server2pid" ] && eval $cmd && echo "Hiveserver2 Is Starting!" || echo "HiveServer2 IS Running!"
}

function hive_stop(){
	metapid=$(check_process Hivemetastore 9083)
	[ "$metapid" ] && kill $metapid && echo "Hivemetastore Is Killing!" || echo "Hivemetastore Not Running!"
	server2pid=$(check_process HiveServer2 10000)
	[ "$server2pid" ] && kill $server2pid && echo "HiveServer2 Is Killing!" || echo "HiveServer2 Not Running!"
}


case $1 in
	"start" )
		hive_start
		;;
	"stop" )
		hive_stop
		;;
	"restart" )
		hive_stop
		sleep 5
		hive_start
		;;
	"status" )
		check_process Hivemetastore 9083 && echo "Hivemetastore Is Running!" || echo "Hivemetastore Not Running!"
		check_process HiveServer2 10000 && echo "HiveServer2 Is Running!" || echo "HiveServer2 Not Running!"
		;;
	* )
		echo "Args Error!"
		;;
esac

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/zaji/5683146.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-12-17
下一篇 2022-12-17

发表评论

登录后才能评论

评论列表(0条)

保存