Hadoop HA 配置文件以及自动化Shell脚本开关HA集群

Hadoop HA 配置文件以及自动化Shell脚本开关HA集群,第1张

目录

配置文件

workers

core-site.xml

hdfs-site.xml

mapred-site.xml

yarn-site.xml 

自动化Shell脚本

format-ha

hadoop-ha

jpsall

xcall

xsync

zk

测试自动化脚本

HA集群初始化 

 启动HA集群

 关闭HA集群


配置文件 workers
hadoop102
hadoop103
hadoop104
core-site.xml


   
  
    fs.defaultFS
    hdfs://mycluster
  

  
  
    hadoop.tmp.dir
    /opt/module/hadoop-3.2.3/data
  

    
  
    ha.zookeeper.quorum
    hadoop102:2181,hadoop103:2181,hadoop104:2181
  
    
    
    
      hadoop.http.staticuser.user

         
        hadoop
    

    
    
      hadoop.proxyuser.hadoop.hosts
      *
    

    
      hadoop.proxyuser.hadoop.groups
      *
    

hdfs-site.xml


  
  
    dfs.namenode.name.dir
    file://${hadoop.tmp.dir}/name
  

  
  
    dfs.datanode.data.dir
    file://${hadoop.tmp.dir}/data
  

  
  
    dfs.journalnode.edits.dir
    ${hadoop.tmp.dir}/jn
  

  
  
    dfs.nameservices
    mycluster
  

  
  
    dfs.ha.namenodes.mycluster
    nn1,nn2,nn3
  

  
  
    dfs.namenode.rpc-address.mycluster.nn1
    hadoop102:8020
  
  
    dfs.namenode.rpc-address.mycluster.nn2
    hadoop103:8020
  
  
    dfs.namenode.rpc-address.mycluster.nn3
    hadoop104:8020
  

  
  
    dfs.namenode.http-address.mycluster.nn1
    hadoop102:9870
  
  
    dfs.namenode.http-address.mycluster.nn2
    hadoop103:9870
  
  
    dfs.namenode.http-address.mycluster.nn3
    hadoop104:9870
  
  
  
  
    dfs.namenode.shared.edits.dir
    qjournal://hadoop102:8485;hadoop103:8485;hadoop104:8485/mycluster
  

  
  
    dfs.client.failover.proxy.provider.mycluster
    org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
  

  
  
    dfs.ha.fencing.methods
    sshfence
  

  
  
    dfs.ha.fencing.ssh.private-key-files
    /home/hadoop/.ssh/id_rsa
  

  
  
    dfs.ha.automatic-failover.enabled
    true
  


mapred-site.xml


    
    
      mapreduce.framework.name
        yarn
    

      
  
        mapreduce.jobhistory.address
        hadoop102:10020
    

  
    
        mapreduce.jobhistory.webapp.address
        hadoop102:19888
    

yarn-site.xml 


      
    yarn.nodemanager.aux-services
    mapreduce_shuffle
  

  
  
    yarn.resourcemanager.ha.enabled
    true
  

  
  
    yarn.resourcemanager.cluster-id
    cluster-yarn1
  

  
  
    yarn.resourcemanager.ha.rm-ids
    rm1,rm2,rm3
  
  
  
  
    yarn.resourcemanager.hostname.rm1
    hadoop102
  
  
  
    yarn.resourcemanager.webapp.address.rm1
    hadoop102:8088
  
  
  
    yarn.resourcemanager.address.rm1
    hadoop102:8032
  
  
  
    yarn.resourcemanager.scheduler.address.rm1
    hadoop102:8030
  
  
  
    yarn.resourcemanager.resource-tracker.address.rm1
    hadoop102:8031
  
  
  
  
    yarn.resourcemanager.hostname.rm2
    hadoop103
  
  
    yarn.resourcemanager.webapp.address.rm2
    hadoop103:8088
  
  
    yarn.resourcemanager.address.rm2
    hadoop103:8032
  
  
    yarn.resourcemanager.scheduler.address.rm2
    hadoop103:8030
  
  
    yarn.resourcemanager.resource-tracker.address.rm2
    hadoop103:8031
  
  
  
  
    yarn.resourcemanager.hostname.rm3
    hadoop104
  
  
  
    yarn.resourcemanager.webapp.address.rm3
    hadoop104:8088
  
  
  
    yarn.resourcemanager.address.rm3
    hadoop104:8032
  
  
  
    yarn.resourcemanager.scheduler.address.rm3
    hadoop104:8030
  
  
  
    yarn.resourcemanager.resource-tracker.address.rm3
    hadoop104:8031
  
  
  
    yarn.resourcemanager.zk-address
    hadoop102:2181,hadoop103:2181,hadoop104:2181
  

  
  
    yarn.resourcemanager.recovery.enabled
    true
  

  
  
    yarn.resourcemanager.store.class
    org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
  

  
  
    yarn.nodemanager.env-whitelist
    JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME
  

    
  
        yarn.log-aggregation-enable
        true
    

  
    
        yarn.log.server.url
        http://hadoop102:19888/jobhistory/logs
    

  
    
        yarn.log-aggregation.retain-seconds
        604800
    



自动化Shell脚本

以下是初始化、开关HA集群所用到的Shell脚本

[hadoop@hadoop102 bin]$ pwd
/home/hadoop/bin
[hadoop@hadoop102 bin]$ ls
format-ha  hadoop-ha  jpsall  xcall  xsync  zk
[hadoop@hadoop102 bin]$
format-ha

初始化HA集群

#!/bin/bash

for HOST in hadoop102 hadoop103 hadoop104
do
        echo "========= delete data and logs in $HOST =========="
        ssh $HOST "rm -rf /opt/module/hadoop-3.2.3/data /opt/module/hadoop-3.2.3/logs"
done

echo "------- create journalnode --------"
xcall hdfs --daemon start journalnode

echo "------- format namenode -------"
ssh hadoop102 "hdfs namenode -format"
ssh hadoop102 "hdfs --daemon start namenode"
ssh hadoop103 "hdfs namenode -bootstrapStandby"
ssh hadoop104 "hdfs namenode -bootstrapStandby"
ssh hadoop102 "/opt/module/hadoop-3.2.3/sbin/stop-dfs.sh"
zk start
hdfs zkfc -formatZK
zk stop
hadoop-ha

开关HA集群

#!/bin/bash

if [ $# -lt 1 ]
then
        echo "No Args Input ..."
        exit;
fi

if [ $# -gt 1 ]
then
        echo "Args Exceeded limit"
        exit;
fi

case  in
"start")
        echo "============= 启动 hadoop 集群 =============="

        echo "------------- start ZooKeeper -----------"
        ssh hadoop102 "/home/hadoop/bin/zk start"

        echo "------------- start Journalnode ------------"
        ssh hadoop102 "/home/hadoop/bin/xcall hdfs --daemon start journalnode"

        echo "------------- 启动 HDFS --------------"
        ssh hadoop102 "/opt/module/hadoop-3.2.3/sbin/start-dfs.sh"

        echo "------------- 启动 yarn --------------"
        ssh hadoop103 "/opt/module/hadoop-3.2.3/sbin/start-yarn.sh"

        echo "------------- 启动 historyserver -------------"
        ssh hadoop102 "/opt/module/hadoop-3.2.3/bin/mapred --daemon start historyserver"
;;
"stop")
        echo "============= 关闭 hadoop 集群 =============="

        echo "------------- 关闭 historyserver -------------"
        ssh hadoop102 "/opt/module/hadoop-3.2.3/bin/mapred --daemon stop historyserver"

        echo "------------- 关闭 yarn --------------"
        ssh hadoop103 "/opt/module/hadoop-3.2.3/sbin/stop-yarn.sh"

        echo "------------- 关闭 HDFS --------------"
        ssh hadoop102 "/opt/module/hadoop-3.2.3/sbin/stop-dfs.sh"

        echo "------------- stop ZooKeeper -------------"
        ssh hadoop102 "/home/hadoop/bin/zk stop"
;;
*)
        echo "Input Args Error..."
;;
esac
jpsall

查看所有节点服务器所有正在运行的Java进程

#!/bin/bash

for host in hadoop102 hadoop103 hadoop104
do
        echo =========== $host ==========
        ssh $host jps
done
xcall

远程执行bash指令

#!/bin/bash

if [ $# -lt 1 ]
then
        echo not enough arguments
fi

for HOST in hadoop102 hadoop103 hadoop104
do
        echo ========= $HOST ========
        CMD=""
        for I in $@
        do
                CMD="$CMD$I "
        done
        echo $CMD
        ssh $HOST "$CMD"
done
xsync

集群同步文件

#!/bin/bash

if [ $# -lt 1 ]
then
        echo Not Enough Argument!
        exit;
fi

for host in hadoop102 hadoop103 hadoop104
do
        echo =============== $host ===============

        for file in $@
        do

                if [ -e $file ]
                then
                        pdir=$(cd -P $(dirname $file); pwd)   # -P 防止加入软链接路径
                        fname=$(basename $file)
                        ssh $host "mkdir -p $pdir"
                        rsync -av $pdir/$fname $host:$pdir   # 由于dest主机的目标目录是绝对路径,所以需要确定$pdir
                else
                        echo $file dose not exists!
                fi
        done
done
zk

开关ZooKeeper集群

#!/bin/bash

case  in
"start"){
        for host in hadoop102 hadoop103 hadoop104
        do
                echo ------------ zookeeper $host 启动 ---------------
                ssh $host "/opt/module/zookeeper-3.5.9/bin/zkServer.sh start"
        done
}
;;
"stop"){
        for host in hadoop102 hadoop103 hadoop104
        do
               echo ------------ zookeeper $host 停止 --------------- 
               ssh $host "/opt/module/zookeeper-3.5.9/bin/zkServer.sh stop"
        done
}
;;
"status"){
        for host in hadoop102 hadoop103 hadoop104
        do
               echo ------------ zookeeper $host 状态 --------------- 
               ssh $host "/opt/module/zookeeper-3.5.9/bin/zkServer.sh status"
        done

}
;;
*) echo Not exist the instruction
;;
esac

测试自动化脚本

HA集群初始化 

 启动HA集群

 

 

 

 

 

 关闭HA集群

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/759351.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-01
下一篇 2022-05-01

发表评论

登录后才能评论

评论列表(0条)

保存