Percona XtraDB Cluster is an active/active high availability and high scalability open source solution for MySQL clustering. It integrates Percona Server and Percona XtraBackup with the Galera library of MySQL high availability solutions in a single package which enables you to create a cost-effective MySQL high availability cluster.
Here following a 3 node mysql cluster using Percona. The infrastructure is as follows.
All the Nodes are running on Centos 6.6. The IPAddress / Hostnames of the Nodes are as follows. Turn off IPTABLES in all these nodes, OR allow traffic between these nodes.
192.168.1.1 cluster-node1
192.168.1.2 cluster-node2
192.168.1.3 cluster-node3
Add these entries in each and every nodes /etc/hosts files.
#vim /etc/hosts
192.168.1.1 cluster-node1
192.168.1.2 cluster-node2
192.168.1.3 cluster-node3
Disable SELINUX
Set Up SSH Keys and place into authorized_keys so all the id_rsa.pub values are in authorized_keys on all servers.
# ssh-keygen -t rsa
# cd /root/.ssh/
# cp id_rsa.pub authorized_keys
# chmod 600 /root/.ssh/authorized_keys
# chmod 700 /root/.ssh/
Install the Needed Repositories for Percona & Socat.
# yum -y install http://www.percona.com/downloads/percona-release/percona-release-0.0-1.x86_64.rpm
# yum -y install http://mirror.pnl.gov/epel/6/x86_64/epel-release-6-8.noarch.rpm
# wget http://www.percona.com/downloads/RPM-GPG-KEY-percona /etc/pki/rpm-gpg/RPM-GPG-KEY-percona
# wget http://www.percona.com/downloads/RPM-GPG-KEY-percona /etc/pki/rpm-gpg/RPM-GPG-KEY-percona
# yum -y install socat
Remove the following applications to avoid the dependencies, Later we can bring them back.
# rpm -e mysql-libs postfix cronie redhat-lsb-core redhat-lsb-printing redhat-lsb-graphics libcgroup numad redhat-lsb sysstat crontabs cronie-anacron redhat-lsb-compat
Install the Percona Cluster Packages (Repeat the following Commands in all the Nodes)
#yum -y install Percona-XtraDB-Cluster-full-56
Start the Server.
[root@cluster-node1 ~]# /etc/init.d/mysql start
Starting MySQL (Percona XtraDB Cluster)......... SUCCESS!
[root@cluster-node2 ~]# /etc/init.d/mysql start
Starting MySQL (Percona XtraDB Cluster)......... SUCCESS!
[root@cluster-node3 ~]# /etc/init.d/mysql start
Starting MySQL (Percona XtraDB Cluster)......... SUCCESS!
Run the following Commands
#mysql -e "CREATE FUNCTION fnv1a_64 RETURNS INTEGER SONAME 'libfnv1a_udf.so'"
#mysql -e "CREATE FUNCTION fnv_64 RETURNS INTEGER SONAME 'libfnv_udf.so'"
#mysql -e "CREATE FUNCTION murmur_hash RETURNS INTEGER SONAME 'libmurmur_udf.so'"
Bring in Back the removed packages
#yum -y install postfix cronie redhat-lsb-core redhat-lsb-printing redhat-lsb-graphics libcgroup numad redhat-lsb sysstat crontabs cronie-anacron redhat-lsb-compat
Create the my.cnf file in first node and insert the following contents
#vim /etc/my.cnf
[mysqld]
datadir=/var/lib/mysql
user=mysql
# Path to Galera library
wsrep_provider=/usr/lib64/libgalera_smm.so
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
# cluster-node 1 192.168.1.1
# cluster-nod3 2 192.168.1.2
# cluster-nod3 3 192.168.1.3
wsrep_cluster_address=gcomm://192.168.1.1,192.168.1.2,192.168.1.3
# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW
# MyISAM storage engine has only experimental support
default_storage_engine=InnoDB
# This changes how InnoDB auto increment locks are managed and is a requirement for Galera
innodb_autoinc_lock_mode=2
# Node #1 address
wsrep_node_address=192.168.1.1
# SST method
#wsrep_sst_method=xtrabackup
wsrep_sst_method=rsync #
# wsrep_sst_method=rsync_wan #
# wsrep_sst_method=mysqldump # SLOW
# Cluster name
wsrep_cluster_name=percona_cluster
# Authentication for SST method
wsrep_sst_auth="root:password"
# server_id
server_id=3232235553 #SELECT INET_ATON('192.168.1.1')
#[client]
socket=/var/lib/mysql/mysql.sock
Start the First Clustering Node.
#/etc/init.d/mysql start --wsrep-cluster-address="gcomm://"
Starting MySQL (Percona XtraDB Cluster)...................................... SUCCESS!
Read the grastate.dat file . (It will be in /var/lib/mysql)
[root@cluster-node1 mysql]# cat grastate.dat
# GALERA saved state
version: 2.1
uuid: 68aa64c3-a966-11e4-96dd-b6bab6cb5af6
seqno: -1
cert_index:
Check the Cluster Status.
mysql>select @@hostname\G show global status like 'wsrep_cluster_size' \G
*************************** 1. row ***************************
@@hostname: controller-node1
1 row in set (0.00 sec)
*************************** 1. row ***************************
Variable_name: wsrep_cluster_size
Value: 3
1 row in set (0.00 sec)
#Now copy the following Contents in /etc/my.cnf file of Second Node
#vim /etc/my.cnf
[mysqld]
datadir=/var/lib/mysql
user=mysql
# Path to Galera library
wsrep_provider=/usr/lib64/libgalera_smm.so
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
# cluster-node 1 192.168.1.1
# cluster-nod3 2 192.168.1.2
# cluster-nod3 3 192.168.1.3
wsrep_cluster_address=gcomm://192.168.1.1,192.168.1.2,192.168.1.3
# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW
# MyISAM storage engine has only experimental support
default_storage_engine=InnoDB
# This changes how InnoDB auto increment locks are managed and is a requirement for Galera
innodb_autoinc_lock_mode=2
# Node #1 address
wsrep_node_address=192.168.1.2
# SST method
#wsrep_sst_method=xtrabackup
wsrep_sst_method=rsync #
# wsrep_sst_method=rsync_wan #
# wsrep_sst_method=mysqldump # SLOW
# Cluster name
wsrep_cluster_name=percona_cluster
# Authentication for SST method
wsrep_sst_auth="root:password"
# server_id
server_id=3232235553 #SELECT INET_ATON('192.168.1.2')
#[client]
socket=/var/lib/mysql/mysql.sock
Start the Cluster in Second Node.
[root@cluster-node2]#/etc/init.d/mysql start
Starting MySQL (Percona XtraDB Cluster)........................... SUCCESS!
Now check the Status in Both Nodes.
First Node
mysql> select @@hostname\G show global status like 'wsrep_cluster_size' \G
*************************** 1. row ***************************
@@hostname: controller-node1
1 row in set (0.00 sec)
*************************** 1. row ***************************
Variable_name: wsrep_cluster_size
Value: 2
1 row in set (0.00 sec)
Second Node
mysql> select @@hostname\G show global status like 'wsrep_cluster_size' \G
*************************** 1. row ***************************
@@hostname: controller-node2
1 row in set (0.00 sec)
*************************** 1. row ***************************
Variable_name: wsrep_cluster_size
Value: 2
1 row in set (0.00 sec)
Now, We are adding 3 Node to the Cluster.
#Now copy the following Contents in /etc/my.cnf file of Third Node
#vim /etc/my.cnf
[mysqld]
datadir=/var/lib/mysql
user=mysql
# Path to Galera library
wsrep_provider=/usr/lib64/libgalera_smm.so
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
# cluster-node 1 192.168.1.1
# cluster-nod3 2 192.168.1.2
# cluster-nod3 3 192.168.1.3
wsrep_cluster_address=gcomm://192.168.1.1,192.168.1.2,192.168.1.3
# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW
# MyISAM storage engine has only experimental support
default_storage_engine=InnoDB
# This changes how InnoDB auto increment locks are managed and is a requirement for Galera
innodb_autoinc_lock_mode=2
# Node #1 address
wsrep_node_address=192.168.1.3
# SST method
#wsrep_sst_method=xtrabackup
wsrep_sst_method=rsync #
# wsrep_sst_method=rsync_wan #
# wsrep_sst_method=mysqldump # SLOW
# Cluster name
wsrep_cluster_name=percona_cluster
# Authentication for SST method
wsrep_sst_auth="root:password"
# server_id
server_id=3232235553 #SELECT INET_ATON('192.168.1.3')
#[client]
socket=/var/lib/mysql/mysql.sock
[root@cluster-node3]#/etc/init.d/mysql start
Starting MySQL (Percona XtraDB Cluster)........................... SUCCESS!
Now Check the Cluster status in Each nodes.
First Node
mysql> select @@hostname\G show global status like 'wsrep_cluster_size' \G
*************************** 1. row ***************************
@@hostname: controller-node1
1 row in set (0.00 sec)
*************************** 1. row ***************************
Variable_name: wsrep_cluster_size
Value: 3
1 row in set (0.00 sec)
Second Node
mysql> select @@hostname\G show global status like 'wsrep_cluster_size' \G
*************************** 1. row ***************************
@@hostname: controller-node2
1 row in set (0.00 sec)
*************************** 1. row ***************************
Variable_name: wsrep_cluster_size
Value: 3
1 row in set (0.00 sec)
Third Node
mysql> select @@hostname\G show global status like 'wsrep_cluster_size' \G
*************************** 1. row ***************************
@@hostname: controller-node2
1 row in set (0.00 sec)
*************************** 1. row ***************************
Variable_name: wsrep_cluster_size
Value: 3
1 row in set (0.00 sec)
You can see that, the Value has changed to 3. All are Good Now.
Testing.
Create a Test Database in First Node, You can see that it will be replicated in Another nodes.