还是先上规划图
1.首先进行资源的分析
1)Vip
2)Mysqld
3)Nfs
理清他们之间的启动先后顺序:nfs必须在Mysqld前启动
2.nfs的配置
Nfs共享目录上挂载的分区,最好做成lvm,实现自动扩展
2.1.安装
#yum -y install nfs-utils
2.2配置
#mkdir /share
#vim /etc/exports
172.16.98.3:/share 172.16.98.1(rw,no_root_squash) 172.16.98.2(rw,no_root_squash)
#service nfs start
#groupadd -g 186 mysql
#useradd -u 186 -g mysql -s /sbin/nologin -M mysql
#chown mysql:mysql /share
3.Mysql1结合nfs的安装测试
3.1挂载nfs
#mkdir /data
#chown mysql:mysql /data
#mount 172.16.98.3:/share /data
3.2使用mysql的解压缩包安装
#groupadd -g 186 mysql
#useradd -u 186 -g mysql -s /sbin/nologin -M mysql
*在三台机器上,创建的mysql组和用户的uid、gid要保持一致
# tar xf mysql-5.5.24-linux2.6-i686.tar.gz -C /usr/local
#cd /usr/local
#ln -s mysql-5.5.24-linux2.6-i686 mysql
#cd mysql
#chown -R mysql:mysql .
#scripts/mysql_install_db --user=mysql --datadir=/data
#chowm -R root .
#cp support-files/my-large.cnf /etc/my.cnf
#cp cp support-files/mysql.server /etc/rc.d/init.d/mysqld
#chmod +x /etc/rc.d/init.d/mysqld
#vim /etc/profile
PATH=$PATH:/usr/local/mysql/bin
#export PATH=$PATH:/usr/local/mysql/bin
3.3编辑配置文件,启动服务
#vim /etc/my.cnf
[mysqld]
thread_concurrency = 2
datadir=/data
#service mysqld start
3.4另一台mysql的安装
参考前面的步骤
需要说明一点的是
Mysql的安装,这里不需要再初始化mysql
## tar xf mysql-5.5.24-linux2.6-i686.tar.gz -C /usr/local
#cd /usr/local
#ln -s mysql-5.5.24-linux2.6-i686 mysql
#cd mysql
#chowm -R root .
#cp support-files/my-large.cnf /etc/my.cnf
#cp cp support-files/mysql.server /etc/rc.d/init.d/mysqld
#chmod +x /etc/rc.d/init.d/mysqld
#vim /etc/profile
PATH=$PATH:/usr/local/mysql/bin
#export PATH=$PATH:/usr/local/mysql/bin
#service mysqld start
#cd /data
#
3.5停止所有资源
1)关闭mysql服务
2)将nfs共享目录卸载
4.Corosync的安装
前期准备
1)ssh 双机互信,方便配置
2)时间保持一致
3)/etc/hosts ,主机名设置,互相解析
4.1corosync的安装,两台都安装上
## yum install -y cluster-glue-1.0.6-1.6.el5.i386.rpm cluster-glue-libs-1.0.6-1.6.el5.i386.rpm corosynclib-1.2.7-1.1.el5.i386.rpm corosync-1.2.7-1.1.el5.i386.rpm heartbeat-3.0.3-2.3.el5.i386.rpm heartbeat-libs-3.0.3-2.3.el5.i386.rpm libesmtp-1.0.4-5.el5.i386.rpm pacemaker-cts-1.1.5-1.1.el5.i386.rpm pacemaker-libs-1.1.5-1.1.el5.i386.rpm pacemaker-1.1.5-1.1.el5.i386.rpm perl-TimeDate-1.16-5.el5.noarch.rpm resource-agents-1.0.4-1.1.el5.i386.rpm
4.2corosync的配置
1)mysql1
#cd /etc/corosync
#cp corosync.conf.example corosync.conf
#vim corosync.conf
compatibility: whitetank
totem {
version: 2
secauth:on 开启身份验证
threads: 0
interface {
ringnumber: 0
bindnetaddr: 172.16.0.0
mcastaddr: 226.94.1.1
mcastport: 5405
}
}
logging {
fileline: off
to_stderr: on
to_logfile: yes
# to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
service {
ver:0
name:pacemaker
}
#corosync-keygen 创建authkeys
#scp authkeys corosync.conf node2:/etc/corosync
两台mysql上分别创建用于日志的目录
#mkdir /var/log/cluster
4.3通过mysql1开启corosync,配置资源
1)开启
#service corosync start
#ssh node2 'service corosync start'
#crm_mon
============
Last updated: Thu Aug 9 22:12:22 2012
Stack: openais
Current DC: node1.7ing.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============
Online: [ node2.7ing.com node1.7ing.com ]
2)资源的配置
#crm
crm(live)#configure
crm(live)configure# primitive vip ocf:heartbeat:IPaddr params ip=172.16.99.1
crm(live)configure# primitive mysqld lsb:mysqld
crm(live)configure# primitive nfs ocf:heartbeat:Filesystem params device=172.16.98.3:/share directory=/data fstype=nfs op start timeout=60 op stop timeout=60
*定义nfs资源,默认的超时时间是20s,是小于建议的时间60s的,所以手动配置
crm(live)configure# colocation vip_mysqld_nfs inf: mysqld nfs vip
crm(live)configure# order mysqld_after_nfs inf: nfs mysqld
crm(live)configure# property stonith-enabled=false
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# verify
crm(live)configure# commit
3)检测
#crm_mon
Last updated: Thu Aug 9 22:34:52 2012
Stack: openais
Current DC: node1.7ing.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
3 Resources configured.
============
Online: [ node2.7ing.com node1.7ing.com ]
nfs (ocf::heartbeat:Filesystem): Started node1.7ing.com
vip (ocf::heartbeat:IPaddr): Started node1.7ing.com
mysqld (lsb:mysqld): Started node1.7ing.com
#crm node standy
#crm_mon
============
Last updated: Thu Aug 9 22:36:18 2012
Stack: openais
Current DC: node1.7ing.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
3 Resources configured.
============
Node node1.7ing.com: standby
Online: [ node2.7ing.com ]
nfs (ocf::heartbeat:Filesystem): Started node2.7ing.com
vip (ocf::heartbeat:IPaddr): Started node2.7ing.com
mysqld (lsb:mysqld): Started node2.7ing.com
基础环境
mysql主机1(Ha1):169.254.235.100
mysql主机2(Ha2):169.254.235.101
共享存储(Store):169.254.235.102
前置条件
1.yum install nfs-utils -y (会自动安装好nfs-utils和rpcbind2个搭建NFS必须的包)
/etc/init.d/rpcbind start (必须先启动rpcbind,否则nfs启动时会报错)
/etc/init.d/nfs start
2.setenforce 0 (关闭selinux)
3.服务端和客户端iptables的设置
eg.服务端规则
iptables -I INPUT -s 169.254.235.100 -j ACCEPT
iptables -I INPUT -s 169.254.235.101 -j ACCEPT
service iptables save
service iptables restart
4.创建相应的用户和文件夹
用户:服务端和客户端创建用户的uid,gid必须一致(这里都创建mysql)
[root@NFS rsyncd]# groupadd -g 502 mysql
[root@NFS rsyncd]# useradd -u 502 mysql -g mysql -G mysql
[root@NFS rsyncd]# id mysql
uid=502(mysql) gid=502(mysql) groups=502(mysql)
文件夹:在服务端创建需要共享的文件夹 /mysql_share
在服务端和客户端创建挂载的文件夹 /data
修改上述2个文件夹的属主为mysql
配置NFS
服务端
echo '/mysql_share 169.254.235.100(rw,no_root_squash) 169.254.235.101(rw,no_root_squash)' >>/etc/exports
重载配置
exportfs -rv
######################################################
常见错误:
1.这里并没有写自己的IP,所以不要尝试挂载自己来检测是否成功
(尝试挂载会得到报错:access denied by server while mounting....)
2.写在一行,不要换行,不然会有报错
eg.
[root@NFS rsyncd]# vi /etc/exports
/data
192.168.40.100(rw,no_root_squash)
192.168.40.101(rw,no_root_squash)
执行挂载报错:
[root@NFS rsyncd]# exportfs -rv
exportfs: No options for /data : suggest (sync) to avoid warning
exportfs: No options for 192.168.40.100(rw,no_root_squash) : suggest (sync) to avoid warning
exportfs: No options for 192.168.40.101(rw,no_root_squash) : suggest (sync) to avoid warning
exporting :192.168.40.101(rw,no_root_squash)
exportfs: Failed to stat 192.168.40.101(rw,no_root_squash): No such file or directory
exporting :192.168.40.100(rw,no_root_squash)
exportfs: Failed to stat 192.168.40.100(rw,no_root_squash): No such file or directory
exporting :/data
修改后:
[root@NFS rsyncd]# cat /etc/exports
/data 192.168.40.100(rw,no_root_squash) 192.168.40.101(rw,no_root_squash)
[root@NFS rsyncd]# exportfs -rv
exporting 192.168.40.100:/data
exporting 192.168.40.101:/data
3.配置的机器没有安装nfs相关组件
[root@Master02 /]# mount -t nfs 192.168.40.110:/data /datamount: wrong fs type, bad option, bad superblock on 192.168.40.110:/data, missing codepage or helper program, or other error (for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount. helper program)
In some cases useful info is found in syslog - try
dmesg | tail or so
根据提示: need a /sbin/mount.
可以发现不存在/sbin/mount.nfs
执行安装 yum install -y nfs-utils
之后会生成/sbin/mount.nfs
客户端并不需要启动rpcbind或者nfs的服务,安装完成后执行挂载即可
######################################################
客户端
在2个客户端直接进行挂载 *** 作
mount -t nfs 169.254.235.102:/mysql_share /data
完成后可以通过showmount -e 169.254.235.102来查看挂载情况
也可以通过df -h在磁盘信息里面直接看到nfs挂载磁盘的信息
--------------------------------------------------
至此NFS环境已经搭建完成
本以为把Mysql的datadir设置成为挂载的共享磁盘就万事大吉,然而了解更多之后发现2个Mysql机之间还需要通过heartbeat或者corosync来实现心跳配置
在实验中pacemaker和heartbeat的配置一直报错;加上目前Mysql高可用基本不采用NFS+Mysql的方式,所以进一步的实验就此搁置
进一步实践参考: 小型架构实践--Mysql双主+corosync+NFS
一、配置:
环境:
CentOS7
VMware
笔者配置了四台虚拟机:
K8S-Master节点: 3GB内存 2核CPU 20GB硬盘空间
K8S-node1节点: 2GB内存 2核CPU 30GB硬盘空间
K8S-node2节点: 2GB内存 2核CPU 30GB硬盘空间
镜像仓库节点: 2GB内存 2核CPU 50GB硬盘空间
二、节点规划:
使用三台虚拟机搭建K8S集群,使用一台虚拟机搭建镜像仓库。
每台虚拟机配置两块网卡,其中一块为“NAT模式”,用于拉取镜像等功能。
另外一块网卡为“仅主机模式”,用于集群节点间的通信。归划如下:
K8s-master节点:
仅主机模式:10.10.10.200
NAT模式: 192.168.200.130
K8S-node1节点:
仅主机模式:10.10.10.201
NAT模式: 192.168.200.131
K8S-node2节点:
仅主机模式:10.10.10.202
NAT模式: 192.168.200.132
镜像仓库节点:
仅主机模式:10.10.10.101
NAT模式: 192.168.200.150
三、版本信息
Linux内核版本:
Linux version 3.10.0-862.el7.x86_64 (builder@kbuilder.dev.centos.org)
(gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) )
#1 SMP Fri Apr 20 16:44:24 UTC 2018
K8s集群版本为1.15.0版本:
四、基于StatefulSet与PV/PVC的MySql持久化存储实验
1. 在每个节点安装nfs服务
在“镜像仓库”节点,执行以下命令:
yum install -y nfs-common nfs-utils rpcbind
在k8s集群,执行以下命令:
yum install -y nfs-utils rpcbind
2. 在“镜像仓库”节点下,配置nfs服务器
mkdir /nfs_mysql
Chmod 777 /nfs_mysql/
(在测试环境中,为了不考虑用户属性,暂时赋予777权限,但在生产环境不推荐这样做)
Chown nfsnobody /nfs_mysql/
echo “/nfs_mysql *(rw,no_root_squash,no_all_squash,sync)” >>/etc/exports
cat /etc/exports
/nfs_mysql *(rw,no_root_squash,no_all_squash,sync)
systemctl start rpcbind
systemctl start nfs
3. 测试nfs服务是否可用
mkdir /test
showmount -e 10.10.10.101
可见/nfs_mysql *已暴露于共享目录,接下来测试挂载是否可用:
在master节点下执行:
mount -t nfs 10.10.10.101:/nfs_mysql /test/
echo "hello-world">>/test/1.txt
在镜像仓库节点下查看1.txt是否存在,若存在则挂载成功:
可见nfs服务可以正常使用,接下来删除test目录和1.txt
在镜像仓库下:
[root@hub nfs_mysql]# rm -f 1.txt
在Master节点下:
[root@k8s-master ~]# umount /test/
[root@k8s-master ~]# rm -rf /test/
同理,依照以上步骤同时创建:(提供多个mysql副本进行挂载)
nfs_mysql1
nfs_mysql2
完成后需要重启nfs服务
systemctl restart rpcbind
systemctl restart nfs
最终效果:
4. 将nfs封装成pv
创建mysql_test文件夹,将yaml文件统一保存在此目录下
mkdir mysql_test
cd mysql_test
vim mysql-pv.yml
mysql-pv.yml配置如下:
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs_mysql
server: 10.10.10.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv1
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs_mysql1
server: 10.10.10.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv2
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs_mysql2
server: 10.10.10.101
注意:
在k8s集群15版本中recycle回收策略已被删除,只能用retain策略或者Delete策略。这里我们使用 persistentVolumeReclaimPolicy: Retain
执行命令:
kubectl create -f mysql-pv.yml
kubectl get pv
如图所示,即为Pv创建成功。
5. 部署MySQL,在mysql_test目录下编写mysql.yml,配置文件如下
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
clusterIP: None
selector:
app: mysql
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
serviceName: "mysql"
replicas: 3
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: password
ports:
- containerPort: 3306
name: mysql
volumeMounts:
- name: mysql-persistent-storage
mountPath: /var/lib/mysql
volumeClaimTemplates:
- metadata:
name: mysql-persistent-storage
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: "nfs"
resources:
requests:
storage: 1Gi
执行以下命令,部署mysql服务:
kubectl create -f mysql.yml
如图可知,mysql按StatefulSet依次创建了mysql-0 mysql-1 mysql-2
查看各个Pod部在哪个节点:
6. 通过创建临时容器,使用MySQL客户端发送测试请求给MySQL master节点
注意:
主机名为mysql-0.mysql;跨命名空间的话,主机名请使用mysql-0.mysql. [NAMESPACE_NAME].如果没有指定命名空间,默认为default,即 mysql-0.mysql. default。
这里笔者打算关闭node2节点来模拟node2宕机,来测试是否实现数据的持久化存储,
所以我们向node2上的mysql1写入数据。
执行以下命令,访问mysql1:
kubectl run mysql-client --image=mysql:5.6 -it --rm --restart=Never -- mysql -h mysql-1.mysql.default -p password
创建数据库demo,并向messages表中写入hello-world
CREATE DATABASE demo
CREATE TABLE demo.messages (message VARCHAR(250))
INSERT INTO demo.messages VALUES ('hello-world')
如图所示
接下来我们来关闭k8s-node2虚拟机,模拟宕机
查看nodes的运行状态,可知node2的状态已转变为NotReady
一段时间后,k8s将Pod MySql -1迁移到节点k8s-node1
由于时间过长,笔者把三个Pod都删除重启后,验证数据:
MySQL服务恢复,数据完好无损!
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)