Hadoop单机下安装和配置步骤

Hadoop单机下安装和配置步骤,第1张

Hadoop单机下安装和配置步骤
#################################
# HADOOP 3.1.3 single1 安装配置  #
#################################

 
 cd /opt/download
 ls     =>hadoop-3.1.3.tar.gz
#解压到software文件夹里
 tar -zxvf /opt/download/hadoop-3.1.3.tar.gz -C /opt/software/
 chown -R root:root /opt/software/hadoop313
 cd /opt/software
 ls     =>hadoop-3.1.3
#重命名
 mv hadoop-3.1.3/ hadoop313
 ls     =>hadoop313
 cd hadoop313/
 pwd   =>/opt/software/hadoop313

#环境变量并激活
 vim /etc/profile.d/my.sh
#---------------------------------------------------------------------
#hadoop 3.1.3
export HADOOP_HOME=/opt/software/hadoop313
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export HDFS_JOURNALNODE_USER=root
export HDFS_ZKFC_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
#---------------------------------------------------------------------
#激活配置文件
 source /etc/profile

#创建数据临时目录
 cd /opt/software/hadoop313
 mkdir data

#配置内部环境变量
   #切换到hadoop配置以下文件
 cd /opt/software/hadoop313/etc/hadoop
 vim hadoop-env.sh
 #-----------------------------------------------------------------
 export JAVA_HOME=/opt/software/jdk8   (前面#号去掉,后面jdk8为自己software下安装的jdk名)
 #-----------------------------------------------------------------

#更改windows本地映射
 C:WindowsSystem32driversetchosts  
 打开这个hosts文件在最后面一行加上
 192.168.245.168 single1          ----->内容为自己的虚拟机IP地址 虚拟机主机名

#配置核心配置文件
 vim core-site.xml
#-----------------------------------------------------------------------------------

    fs.defaultFS
hdfs://single1:9000


    hadoop.tmp.dir
    /tmp/hadoop/kb16
    namenode上本地的hadoop临时文件夹


    hadoop.http.staticuser.user
    root


    io.file.buffer.size
    131072
    Size of read/write SequenceFiles buffer: 128K

#-----------------------------------------------------------------------------------

#配置文件系统
 vim hdfs-site.xml
#--------------------------------------------------

   dfs.replication
   1
   Hadoop中每个block的备份数


   dfs.namenode.name.dir
   /opt/software/hadoop313/data/dfs/name
   namenode上存储hdfs名字空间元数据 


   dfs.datanode.data.dir
   /opt/software/hadoop313/data/dfs/data
   datanode上数据块的物理存储位置


   dfs.namenode.secondary.http-address
   single1:9869




   dfs.permissions.enabled
   false
   关闭权限验证

#--------------------------------------------------

#计算框架配置
vim mapred-site.xml
#----------------------------------------------------------------------------------

   mapreduce.framework.name
   yarn
   job执行框架:local, classic or yarn.
   true


   mapreduce.application.classpath
   $HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*




   mapreduce.jobhistory.address
   single1:10020


   mapreduce.jobhistory.webapp.address
   single1:19888




   mapreduce.map.memory.mb
   1024


   mapreduce.reduce.memory.mb
   2048

#----------------------------------------------------------------------------------

#调度框架配置
vim yarn-site.xml
#-------------------------------


   yarn.resourcemanager.connect.retry-interval.ms
   10000


   yarn.resourcemanager.scheduler.class
   org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler




   Address where the localizer IPC is. ********* 
   yarn.nodemanager.localizer.address
   single1:8040


   Address where the localizer IPC is. ********* 
   yarn.nodemanager.address
   single1:8050


   NM Webapp address. ********* 
   yarn.nodemanager.webapp.address
   single1:8042


   yarn.nodemanager.aux-services
   mapreduce_shuffle


   yarn.nodemanager.local-dirs
   /tmp/hadoop/yarn/local


   yarn.nodemanager.log-dirs
   /tmp/hadoop/yarn/log


   yarn.nodemanager.vmem-check-enabled
   false


   yarn.application.classpath
   $HADOOP_HOME/etc/hadoop:$HADOOP_HOME/share/hadoop/common/lib/*:$HADOOP_HOME/share/hadoop/common/*:$HADOOP_HOME/share/hadoop/hdfs:$HADOOP_HOME/share/hadoop/hdfs/lib/*:$HADOOP_HOME/share/hadoop/hdfs/*:$HADOOP_HOME/share/hadoop/mapreduce/*:$HADOOP_HOME/share/hadoop/yarn:$HADOOP_HOME/share/hadoop/yarn/lib/*:$HADOOP_HOME/share/hadoop/yarn/*


   yarn.nodemanager.env-whitelist
   JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME

#-------------------------------

#删除data下面的文件
   cd ../../       切换到hadoop313目录
   rm -rf data/*  
#格式化:首次
   hdfs namenode -format

#开启 
   #启动hdfs  
    start-dfs.sh

    jps 
    #----------------------
    2002 DataNode
    2377 Jps
    1866 NameNode
    2236 SecondaryNameNode
    #---------------------- 
   #启动yarn    
    start-yarn.sh

    jps
    #-----------------------------
    2002 DataNode
    2838 Jps
    2648 NodeManager
    1866 NameNode
    2236 SecondaryNameNode
    2524 ResourceManager
    #-----------------------------
   #启动historyserver
    mapred --daemon start historyserver

#关闭
   #关闭historyserver
    mapred --daemon stop historyserver
   #关闭yarn
    stop-yarn.sh
   #关闭hdfs
    stop-dfs.sh

注意以上的配置文件中,xml格式的配置文件里面,single1位博主的虚拟机主机名,如果你们的主机名不是single1,要么把xml格式里的配置文件里的single1都换成你们自己的虚拟机主机名,要么可以把你们的hostname也换成和博主一样的single1就可以不用改动直接粘贴了。

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/zaji/5688628.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-12-17
下一篇 2022-12-17

发表评论

登录后才能评论

评论列表(0条)

保存