文件配置
修改三个文件的JAVA_HOME [root@linux02 hadoop]# vi ./hadoop-env.sh 25 export JAVA_HOME=/opt/soft/jdk180 [root@linux02 hadoop]# vi ./yarn-env.sh 23 export JAVA_HOME=/opt/soft/jdk180 [root@linux02 hadoop]# vi ./mapred-env.sh 16 export JAVA_HOME=/opt/soft/jdk180 [root@linux02 hadoop]# vi ./core-site.xml20 21 -------------------------------------------------- [root@linux02 hadoop]# vi ./hdfs-site.xml22 25 26 27fs.defaultFS 23hdfs://mycluster/ 2428 31 32 33hadoop.tmp.dir 29/opt/soft/hadoop260/hadooptmp/ 3034 37 38 39ha.zookeeper.quorum 35linux03:2181,linux04:2181,linux05:2181 3640 43 44hadoop.proxyuser.bigdata.hosts 41* 4245 48hadoop.proxyuser.bigdata.groups 46* 4728 31 32 33dfs.nameservices 29mycluster 3034 37 38 39dfs.ha.namenodes.mycluster 35nn1,nn2 3640 43 44 45dfs.namenode.rpc-address.mycluster.nn1 41linux02:9000 4246 49 50 51 52dfs.namenode.http-address.mycluster.nn1 47linux02:50070 4853 56 57 58dfs.namenode.rpc-address.mycluster.nn2 54linux03:9000 5559 62 63 64dfs.namenode.http-address.mycluster.nn2 60linux03:50070 6165 68 69 70dfs.journalnode.edits.dir 66/opt/soft/hadoop260/journaldata 6771 74 75 76dfs.namenode.shared.edits.dir 72qjournal://linux02:8485;linux03:8485;linux04:8485/mycluster 7377 80 81 82dfs.ha.automatic-failover.enabled 78true 7983 86 87 88dfs.client.failover.proxy.provider.mycluster 84org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 8589 95 96 97dfs.ha.fencing.methods 9091 sshfence 92 shell(/bin/true) 93 9498 101 102 103dfs.ha.fencing.ssh.private-key-files 99/root/.ssh/id_rsa 100104 107 108dfs.ha.fencing.ssh.connect-timeout 10530000 106109 112 ----------------------------------------- [root@linux02 hadoop]# cp mapred-site.xml.template mapred-site.xml [root@linux02 hadoop]# vi ./mapred-site.xmldfs.webhdfs.enabled 110true 11120 21 ---------------------------------------------- [root@linux02 hadoop]# vi ./yarn-site.xml22 25 26 27mapreduce.framework.name 23yarn 2428 31 32 33mapreduce.jobhistory.address 29linux05:10020 3034 mapreduce.jobhistory.webapp.address 35linux05:19888 36----------------------------------------- [root@linux02 hadoop]# vi ./slaves linux02 linux03 linux04 linux05 ---------------------------------- [root@linux02 hadoop]# xsync ./hadoop260/ ----------------- [root@linux02 hadoop]# vi /etc/profile [root@linux03 hadoop]# vi /etc/profile [root@linux04 hadoop]# vi /etc/profile [root@linux05 hadoop]# vi /etc/profile export HADOOP_HOME=/opt/soft/hadoop260 export HADOOP_MAPRED_HOME=$HADOOP_HOME export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib" export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin [root@linux02hadoop]# xsync /etc/profile yarn.resourcemanager.ha.enabled true yarn.resourcemanager.cluster-id yrc yarn.resourcemanager.ha.rm-ids rm1,rm2 yarn.resourcemanager.hostname.rm1 linux02 yarn.resourcemanager.hostname.rm2 linux03 yarn.resourcemanager.zk-address linux03:2181,linux04:2181,linux05:2181 yarn.nodemanager.aux-services mapreduce_shuffle yarn.log-aggregation-enable true yarn.log-aggregation.retain-seconds 86400 yarn.resourcemanager.recovery.enabled true yarn.resourcemanager.store.class org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
启动集群服务
第一次启动集群服务 1. 先将zookeeper集群启动 [root@linux02 hadoop]# zkop status --------linux03 zookeeper status ----------- JMX enabled by default Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg Mode: follower --------linux04 zookeeper status ----------- JMX enabled by default Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg Mode: leader --------linux05 zookeeper status ----------- JMX enabled by default Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg Mode: follower [root@linux02 hadoop]# 2. 启动journalnode [root@linux02 hadoop]# hadoop-daemon.sh start journalnode starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-linux02.out [root@linux02 hadoop]# ssh linux03 "source /etc/profile; hadoop-daemon.sh start journalnode" starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-linux03.out [root@linux02 hadoop]# ssh linux04 "source /etc/profile; hadoop-daemon.sh start journalnode" starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-linux04.out [root@linux02 hadoop]# jqop jps -------------linux02指令信息----------- jps 9489 Jps 9048 JournalNode -------------linux03指令信息----------- jps 12081 Jps 1764 QuorumPeerMain 12006 JournalNode -------------linux04指令信息----------- jps 2595 JournalNode 1752 QuorumPeerMain 2654 Jps -------------linux05指令信息----------- jps 2440 Jps 1758 QuorumPeerMain 3.格式化 [root@linux02 soft]# hadoop namenode -format 将linux02格式化后的hadooptmp文件同步到linux03 [root@linux02 hadoop260]# scp -r ./hadooptmp/ root@linux03:/opt/soft/hadoop260/ VERSION 100% 207 43.3KB/s 00:00 seen_txid 100% 2 0.4KB/s 00:00 fsimage_0000000000000000000.md5 100% 62 23.9KB/s 00:00 fsimage_0000000000000000000 100% 321 120.3KB/s 00:00 4.初始化zookeeper [root@linux02 hadoop260]# hdfs zkfc -formatZK 5.启动HDFS [root@linux02 hadoop260]# start-dfs.sh 21/11/04 23:24:40 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Starting namenodes on [linux02 linux03] linux03: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-linux03.out linux02: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-linux02.out linux05: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux05.out linux04: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux04.out linux02: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux02.out linux03: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux03.out 21/11/04 23:25:02 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Starting ZK Failover Controllers on NN hosts [linux02 linux03] linux02: starting zkfc, logging to /opt/soft/hadoop260/logs/hadoop-root-zkfc-linux02.out linux03: starting zkfc, logging to /opt/soft/hadoop260/logs/hadoop-root-zkfc-linux03.out [root@linux02 hadoop260]# jqop jps -------------linux02指令信息----------- jps 10852 DFSZKFailoverController 9048 JournalNode 10970 Jps 10507 NameNode 10603 DataNode -------------linux03指令信息----------- jps 1764 QuorumPeerMain 12006 JournalNode 13319 Jps 13082 DataNode 13019 NameNode 13211 DFSZKFailoverController -------------linux04指令信息----------- jps 2688 DataNode 2595 JournalNode 1752 QuorumPeerMain 2776 Jps -------------linux05指令信息----------- jps 2562 Jps 2473 DataNode 1758 QuorumPeerMain 6.启动yarn [root@linux02 hadoop260]# start-yarn.sh starting yarn daemons starting resourcemanager, logging to /opt/soft/hadoop260/logs/yarn-root-resourcemanager-linux02.out linux05: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux05.out linux04: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux04.out linux03: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux03.out linux02: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux02.out [root@linux02 hadoop260]# jqop jps -------------linux02指令信息----------- jps 10852 DFSZKFailoverController 11252 NodeManager 9048 JournalNode 11160 ResourceManager 11321 Jps 10507 NameNode 10603 DataNode -------------linux03指令信息----------- jps 13634 Jps 1764 QuorumPeerMain 12006 JournalNode 13082 DataNode 13019 NameNode 13211 DFSZKFailoverController 13501 NodeManager -------------linux04指令信息----------- jps 2688 DataNode 2595 JournalNode 2933 Jps 1752 QuorumPeerMain 2809 NodeManager -------------linux05指令信息----------- jps 2720 Jps 2596 NodeManager 2473 DataNode 1758 QuorumPeerMain [root@linux02 hadoop260]# jqop jps -------------linux02指令信息----------- jps 10852 DFSZKFailoverController 11252 NodeManager 9048 JournalNode 11160 ResourceManager 10507 NameNode 10603 DataNode 11837 Jps -------------linux03指令信息----------- jps 13953 Jps 1764 QuorumPeerMain 13861 ResourceManager 12006 JournalNode 13082 DataNode 13019 NameNode 13211 DFSZKFailoverController 13501 NodeManager -------------linux04指令信息----------- jps 2688 DataNode 2960 Jps 2595 JournalNode 1752 QuorumPeerMain 2809 NodeManager -------------linux05指令信息----------- jps 2596 NodeManager 2473 DataNode 2747 Jps 1758 QuorumPeerMain [root@linux02 hadoop260]# ssh linux05 "source /etc/profile; mr-jobhistory-daemon.sh start historyserver > " starting historyserver, logging to /opt/soft/hadoop260/logs/mapred-root-historyserver-linux05.out [root@linux02 hadoop260]# jqop jps -------------linux02指令信息----------- jps 12177 Jps 10852 DFSZKFailoverController 11252 NodeManager 9048 JournalNode 11160 ResourceManager 10507 NameNode 10603 DataNode -------------linux03指令信息----------- jps 1764 QuorumPeerMain 13861 ResourceManager 12006 JournalNode 13082 DataNode 13019 NameNode 13211 DFSZKFailoverController 13501 NodeManager 14287 Jps -------------linux04指令信息----------- jps 2688 DataNode 2595 JournalNode 1752 QuorumPeerMain 2809 NodeManager 2988 Jps -------------linux05指令信息----------- jps 2596 NodeManager 2871 Jps 2473 DataNode 2794 JobHistoryServer 1758 QuorumPeerMain
查看日志
[root@linux05 ~]# cd /opt/soft/hadoop260/logs [root@linux05 logs]# vi ./mapred-root-historyserver-linux05.log [root@linux05 logs]# cat mapred-root-historyserver-linux05.log
所需脚本链接:https://blog.csdn.net/liuyongsheng666/article/details/121122238?spm=1001.2014.3001.5501
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)