大数据环境脚本一键安装 (持续更新中)

大数据环境脚本一键安装 (持续更新中),第1张

大数据环境脚本一键安装 (持续更新中)
#!/bin/bash
 
#安装必要的软件,持续填充
setup_software(){
	yum -y install vim
	yum -y install tree
	yum -y install lrzsz
}
 
#修改系统名称 同时修改hosts文件
modify_sysname(){
	hostnamectl set-hostname $1
	#先获取hosts文件中对应的内容如果没发现对应的内容才能添加这个地址
	cfg=`cat /etc/hosts | grep $2 | grep -wF $1`
	if [ "$cfg" == "" ];then
		#根据IP地址修改hosts文件
		echo "$2 $1" >> /etc/hosts
	fi
}
 
#修改IP静态地址
modify_staticip(){
	#先检查文件是否已被修改过
	chk=`cat /etc/sysconfig/network-scripts/ifcfg-ens33 | grep static`
	if [ "$chk" == "" ];then
		#修改/etc/sysconfig/network-scripts/ifcfg-ens33文件中的dhcp
		sed -i 's/dhcp/static/' /etc/sysconfig/network-scripts/ifcfg-ens33
		echo "IPADDR=$1" >> /etc/sysconfig/network-scripts/ifcfg-ens33
		echo "NETMASK=255.255.255.0" >> /etc/sysconfig/network-scripts/ifcfg-ens33
		echo "GATEWAY=${1%.*}.2" >> /etc/sysconfig/network-scripts/ifcfg-ens33
		echo "DNS1=114.114.114.114" >> /etc/sysconfig/network-scripts/ifcfg-ens33
		echo "DNS2=8.8.8.8" >> /etc/sysconfig/network-scripts/ifcfg-ens33
	fi
	systemctl restart network
}
 
#关闭防火墙
close_firewalld(){
	systemctl stop firewalld
	systemctl disable firewalld 
}
 
#修改yum源为阿里源
modify_yumsource() {
	# 检查是否已有备份文件 如果有则说明已经做过了
	if [ -e /etc/yum.repos.d/CentOS-base.repo_bak ];then
		echo "don't nothing!"
	else
		#首先安装wget命令
		yum install -y wget
		#修改yum
		cd /etc/yum.repos.d/
		mv CentOS-base.repo CentOS-base.repo_bak
		wget -O /etc/yum.repos.d/CentOS-base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
		yum clean all
		yum makecache
	fi
}
 
#检查文件的文件夹是否存在不存在就创建1个
check_soft_folder(){
	if [ -e /opt/soft/$1 ];then
		echo "/opt/soft/$1 folder already exists"
		return 0
	else
		mkdir -p /opt/soft/$1
		return 1
	fi
}
 
#安装JDK 软件目录必须在/opt下
setup_jdk() {
	#检查JDK是否已经安装过
	check_soft_folder jdk180
	if [ $? == 1 ];then
		#在opt文件夹下搜索jdk的tar.gz文件
		jdkName=`ls /opt/ | grep jdk-*`
		#将文件解压到对应的soft文件夹下
		tar -zxvf /opt/$jdkName -C /opt/soft/jdk180 --strip-components 1
		#配置/etc/profile文件
		echo "" >> /etc/profile
		echo "#java environment" >> /etc/profile
		echo "export JAVA_HOME=/opt/soft/jdk180" >> /etc/profile
		echo "export CLASSPATH=.:${JAVA_HOME}/jre/lib/rt.jar:${JAVA_HOME}/lib/dt.jar:${JAVA_HOME}/lib/tools.jar" >> /etc/profile
		echo "export PATH=$PATH:${JAVA_HOME}/bin" >> /etc/profile
		source /etc/profile
	fi
}
 
#安装mysql5.7
setup_mysql() {
	#检查linux的mariadb是否卸载 如果没有说明没有安装过mysql
	mdb=`rpm -qa | grep mariadb`
	if [ "$mdb" != "" ];then
		rpm -e --nodeps $mdb
		cd /opt/
		wget -i -c http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm
		yum -y install mysql57-community-release-el7-10.noarch.rpm
		yum -y install mysql-community-server
		#修改/etc/my.cnf文件解决中文乱码
		sed -i '/socket/a character-set-server=utf8' /etc/my.cnf
		echo "[client]" >> /etc/my.cnf
		echo "default-character-set=utf8" >> /etc/my.cnf
		echo "[mysql]" >> /etc/my.cnf
		echo "default-character-set=utf8" >> /etc/my.cnf
		systemctl start  mysqld.service
		#获取临时密码
		pwdinfo=`grep "password" /var/log/mysqld.log| grep -wF "temporary password"`
		passwd=${pwdinfo#*localhost:}
		passwd=$(echo $passwd)
		#执行修改密码语句
		mysql -uroot -p$passwd --connect-expired-password -e "set global validate_password_policy=0"
		mysql -uroot -p$passwd --connect-expired-password -e "set global validate_password_length=1"
		mysql -uroot -p$passwd --connect-expired-password -e "ALTER USER 'root'@'localhost' IDENTIFIED BY 'root'"
		#修改远程登录
		mysql -uroot -proot -e "GRANT ALL PRIVILEGES ON *.* TO root@'%' IDENTIFIED BY 'root'"
		mysql -uroot -proot -e "flush privileges"
		#重启服务
		systemctl restart mysqld.service
		#卸载mysql Repository
		yum -y remove mysql57-community-release-el7-10.noarch
		#删除mysql的rpm包
		rm -rf /opt/mysql57-community-release-el7-10.noarch.rpm
	fi
}
 
# 安装hadoop
setup_hadoop(){
	#检查hadoop是否已经安装过
	check_soft_folder hadoop260
	if [ $? == 1 ];then
		#在opt文件夹下搜索hadoop的tar.gz文件
		hadoopName=`ls /opt/ | grep hadoop-*`
		#将文件解压到对应的soft文件夹下
		tar -zxvf /opt/$hadoopName -C /opt/soft/hadoop260 --strip-components 1
		#编辑 hadoop-env.sh 文件,修改JAVA_HOME的路径为jdk目录的绝对路径
		sed -i 's/${JAVA_HOME}//opt/soft/jdk180/' /opt/soft/hadoop260/etc/hadoop/hadoop-env.sh
		#编辑 core-site.xml 文件
		sed -i '//afs.defaultFShdfs://192.168.1.101:9000hadoop.tmp.dir/opt/soft/hadoop260/tmphadoop.proxyuser.root.groups*hadoop.proxyuser.root.hosts*hadoop.proxyuser.root.users*' /opt/soft/hadoop260/etc/hadoop/core-site.xml
		#编辑 hdfs-site.xml 文件
		sed -i '//adfs.replication1' /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
		#修改 mapred-site.xml.template 文件名为 mapred-site.xml,编辑 mapred-site.xml 文件
		mv /opt/soft/hadoop260/etc/hadoop/mapred-site.xml.template /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
		sed -i '//amapreduce.framework.nameyarn' /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
		#编辑 yarn-site.xml 文件
		sed -i '//ayarn.resourcemanager.localhostlocalhostyarn.nodemanager.aux-servicesmapreduce_shuffle' /opt/soft/hadoop260/etc/hadoop/yarn-site.xml
		#配置/etc/profile文件
		echo "" >> /etc/profile
		echo "#hadoop environment" >> /etc/profile
		echo "export HADOOP_HOME=/opt/soft/hadoop260" >> /etc/profile
		echo "export HADOOP_MAPRED_HOME=$HADOOP_HOME" >> /etc/profile
		echo "export HADOOP_COMMON_HOME=$HADOOP_HOME" >> /etc/profile
		echo "export HADOOP_HDFS_HOME=$HADOOP_HOME" >> /etc/profile
		echo "export YARN_HOME=$HADOOP_HOME" >> /etc/profile
		echo "export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native" >> /etc/profile
		echo "export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin" >> /etc/profile
		echo "export HADOOP_INSTALL=$HADOOP_HOME" >> /etc/profile
		source /etc/profile
		#初始化namenode
		hadoop namenode -format
		#生成公钥和私钥
		ssh-keygen -t rsa
		#拷贝公钥给指定机器
		ssh-copy-id $1
		#登录机器
		ssh $1
		#退出当前机器继续执行命令
		#exit
	fi
}
 
 
# 安装hive
setup_hive(){
	#检查hive是否已经安装过
	check_soft_folder hive110
	if [ $? == 1 ];then
		#在opt文件夹下搜索hadoop的tar.gz文件
		hiveName=`ls /opt/ | grep hive-*`
		#将文件解压到对应的soft文件夹下
		tar -zxvf /opt/$hiveName -C /opt/soft/hive110 --strip-components 1
		#创建文件 hive-site.xml,编辑文件内容
		touch /opt/soft/hive110/conf/hive-site.xml
		echo "hive.metastore.warehouse.dir/hive110/warehousehive.metastore.localfalsejavax.jdo.option.ConnectionURLjdbc:mysql://192.168.1.101:3306/hive?createDatabaseIfNotExist=truejavax.jdo.option.ConnectionDriverNamecom.mysql.jdbc.Driverjavax.jdo.option.ConnectionUserNamerootjavax.jdo.option.ConnectionPasswordroothive.server2.authenticationNONEhive.server2.thrift.client.userroothive.server2.thrift.client.passwordroothive.cli.print.headerfalsehive.cli.print.current.dbtrue" >> /opt/soft/hive110/conf/hive-site.xml
		#编辑 /etc/profile
		echo "" >> /etc/profile
		echo "#hive environment" >> /etc/profile
		echo "export HIVE_HOME=/opt/soft/hive110" >> /etc/profile
		echo "export PATH=$PATH:$HIVE_HOME/bin" >> /etc/profile
		source /etc/profile
		#将java连接数据库的jar包,拷贝一份到 /opt/soft/hive110/lib 目录下
		mv /opt/mysql-connector-java-5.1.25.jar /opt/soft/hive110/lib
		#启动hadoop
		sh /opt/soft/hadoop260/sbin/start-dfs.sh
		sh /opt/soft/hadoop260/sbin/start-yarn.sh
		#初始化hive
		schematool -dbType mysql -initSchema
	fi
}
 
# 安装zeppelin
setup_zeppelin(){
	#检查Jzeppelin是否已经安装过
	check_soft_folder zeppelin081
	if [ $? == 1 ];then
		#在opt文件夹下搜索hadoop的tar.gz文件
		zeppelinName=`ls /opt/ | grep zeppelin-*`
		#将文件解压到对应的soft文件夹下
		tar -zxvf /opt/$zeppelinName -C /opt/soft/zeppelin081 --strip-components 1
		#创建并修改 zeppelin-site.xml 配置文件
		cp /opt/soft/zeppelin081/conf/zeppelin-site.xml.template /opt/soft/zeppelin081/conf/zeppelin-site.xml
		sed -i '//azeppelin.helium.registryhelium' /opt/soft/zeppelin081/conf/zeppelin-site.xml
		#继续创建修改zeppelin-env.sh配置文件
		cp /opt/soft/zeppelin081/conf/zeppelin-env.sh.template /opt/soft/zeppelin081/conf/zeppelin-env.sh
		echo "export JAVA_HOME=/opt/soft/jdk180" >> /opt/soft/zeppelin081/conf/zeppelin-env.sh
		echo "export HADOOP_CONF_DIR=/opt/soft/hadoop260/etc/hadoop" >> /opt/soft/zeppelin081/conf/zeppelin-env.sh
		#修改 /etc/profile文件
		echo "" >> /etc/profile
		echo "#zeppelin environment" >> /etc/profile
		echo "export ZEPPELIN_HOME=/opt/soft/zeppelin081" >> /etc/profile
		echo "export PATH=$PATH:$ZEPPELIN_HOME/bin" >> /etc/profile
		#编译 /etc/profile 文件
		source /etc/profile
		#拷贝 hive 的配置文件 hive-site.xml 到 zeppelin081/conf 下
		cp /opt/soft/hive110/conf/hive-site.xml /opt/soft/zeppelin081/conf
		#拷贝下面两个jar包到zeppelin安装目录下interperter中
		cp /opt/soft/hadoop260/share/hadoop/common/hadoop-common-2.6.0-cdh5.14.2.jar /opt/soft/zeppelin081/interpreter/jdbc/
		cp /opt/soft/hive110/lib/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/zeppelin081/interpreter/jdbc/
	fi
}
 
 
#根据用户的选择进行对应的安装
custom_option() {
	case $1 in
		"0")
			setup_software
			;;
		"1")
			modify_sysname $2 $3
			;;
		"2")
			modify_staticip $3
			;;
		"3")
			close_firewalld
			;;
		"4")
			modify_yumsource
			;;
		"5")
			setup_jdk
			;;
		"6")
			setup_mysql
			;;
		"7")
			setup_hadoop $3
			;;
		"8")
			setup_hive $3
			;;
		"9")
			setup_zeppelin
			;;
		"10")
			modify_sysname $2 $3
			modify_staticip $3
			close_firewalld
			modify_yumsource
			setup_software
			setup_jdk
			setup_mysql
			setup_hadoop $3
			setup_hive $3
			setup_zeppelin
			;;
		*)
			echo "please option 0~10, setup all input 10"
	esac
}
 
#规定$1用户安装软件选择 $2用户传入必须是系统的名称 $3用户传入必须是IP地址
custom_option $1 $2 $3

 大数据环境启动脚本(持续更新中)

#! /bin/bash
my_start(){
    if [  == "start" ]; then
        #start hadoop
        sh /opt/soft/hadoop260/sbin/start-dfs.sh
        sh /opt/soft/hadoop260/sbin/start-yarn.sh
        #shart hive
        nohup /opt/soft/hive110/bin/hive --service hiveserver2 &
        #start zeppelin
        sh /opt/soft/zeppelin081/bin/zeppelin-daemon.sh start
        echo "start over"
    else
        #close zeppelin
        sh /opt/soft/zeppelin081/bin/zeppelin-daemon.sh stop
        #close hive
        hiveprocess=`jps | grep RunJar | awk '{print }'`
        for no in $hiveprocess; do
            kill -9 $no
        done
        #stop hadoop
        sh /opt/soft/hadoop260/sbin/stop-dfs.sh
        sh /opt/soft/hadoop260/sbin/stop-yarn.sh
        echo "stop over"
    fi
}
my_start 

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/zaji/5655967.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-12-16
下一篇 2022-12-16

发表评论

登录后才能评论

评论列表(0条)

保存