rpm -qa|grep ntp yum install ntp -y2.删除mariadb
rpm -aq | grep mariadb rpm -e --nodeps mariadb-libs- 版本3.安装mysql
rpm -ivh mysql-community-common ... libs ... libs-compat ... client ... server4.修改主机名
hostnamectl set-hostname xxx bash5.关闭防火墙
systemctl status firewalld systemctl stop firewalld systemctl disable firewalld6.hosts映射
vi /etc/hosts7.修改时区
tzselect TZ="Asia/Shanghai";export TZ8.屏蔽默认server,设master为本地时钟源,服务器层级设为10
vi /etc/ntp.conf server 127.127.0.1 fudge 127.127.0.1 stratum 10 让硬件时间与系统时间同步 vi /etc/sysconfig/ntpd OPTIONS="-g" SYNC_HWCLOCK=yes9.主节点开启ntp,从节点同步
service ntpd start crontab -e */30 10-17 * * * /usr/sbin/ntpdate master10.免密
ssh-keygen -t rsa 回车三次 ssh-copy-id master11.环境变量
vi /etc/profile export JAVA_HOME=/usr/software/java/jdk1.8.0_91 export PATH=$PATH:$JAVA_HOME/bin export ZOOKEEPER_HOME=/usr/software/zookeeper/zookeeper-3.4.14 export PATH=$PATH:$ZOOKEEPER_HOME/bin export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7 export PATH=$PATH:$HADOOP_HOME/bin export PATH=$PATH:$HADOOP_HOME/sbin export HIVE_HOME=/usr/software/hive/apache-hive-2.3.4-bin export PATH=$PATH:$HIVE_HOME/bin source /etc/profile12.zookeeper
mv zoo_sample.cfg zoo.cfg dataDir=/usr/zookeeper/zookeeper-3.4.14/zkdata/ dataLogDir=/usr/zookeeper/zookeeper-3.4.14/zkdatalog/ server.1=master:2888:3888 server.2=slave1:2888:3888 server.3=slave2:2888:3888 mkdir zkdata mkdir zkdatalog cd zkdata touch myid bin/zkServer.sh start13.hadoop
mv $HADOOP_HOME/etc/hadoop hadoop-env.sh export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7core-site.xml
hdfs-site.xmlfs.default.name hdfs://master:9000 hadoop.tmp.dir /usr/hadoop/hadoop-2.7.3/hdfs/tmp
yarn-env.shdfs.namenode.name.dir /usr/software/hadoop/hadoop-2.7.7/hdfs/name dfs.datanode.data.dir /usr/software/hadoop/hadoop-2.7.7/hdfs/data dfs.replication 2
export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7yarn-site.xml
cp mapred-site.xml.template mapred-site.xmlyarn.resourcemanager.admin.address master:18141 yarn.nodemanager.aux-services mapreduce_shuffle
/etc/hadoop/新建master/slaves文件mapreduce.framework.name yarn
touch master master touch slaves slave1 slave2格式化:在master执行
hadoop namenode -format14.数据仓库 是否安装mysql并开启服务
rpm -aq | grep mysql-community-server systemctl status mysqld 生成临时密码 grep "temporary password" /var/log/mysqld.log登录
mysql -uroot -p set global validate_password_policy=0; set global validate_password_length=6; alter user'root'@'localhost'identified by '123456'; grant all privileges on *.* to 'root'@'%' identified by '123456' with grant option; flush privileges;hive-env.sh
mv hive-env.sh.template hive-env.sh export HADOOP_HOME=/usr/software/hadoop/hadoop-2.7.7hive-site.xml
--------------------------------master---------------------------------------------------------------------slave1---------------------------------- hive.metastore.warehouse.dir /user/hive_remote/warehouse hive.metastore.local false hive.metastore.uris thrift://slave1:9083 hive.metastore.warehouse.dir /user/hive_remote/warehouse javax.jdo.option.ConnectionURL jdbc:mysql://slave2:3306/hive?createDatabaseIfNotExist=true&characterEncoding=UTF-8&useSSL=false JDBC connect string for a JDBC metastore javax.jdo.option.ConnectionDriverName com.mysql.jdbc.Driver javax.jdo.option.ConnectionUserName root javax.jdo.option.ConnectionPassword 123456 hive.metastore.schema.verification false datanucleus.schema.autoCreateALL true
mysql lib包 cp mysql-connector-java-5.1.47-bin.jar /usr/hive/apache-hive-2.1.1-bin/lib jline冲突 cp jline-2.12.jar /usr/hadoop/hadoop-2.7.3/share/hadoop/yarn/lib slave1上执行,初始化数据库 schematool -dbType mysql -initSchema slave1上执行 cd /usr/hive/apache-hive-2.1.1-bin bin/hive --service metastore master上执行 cd /usr/hive/apache-hive-2.1.1-bin bin/hive
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)