#格式化磁盘,创建相应的挂在目录 mkfs.xfs -i size=512 /dev/sdb1 mkdir -p /gfs/test1 #将挂在配置信息写入fstab配置文件,以便重启自动挂载 vi /etc/fstab /dev/sdb1 /gfs/test1 xfs defaults 1 2 #加载修改的配置信息 mount -a && mount
Note: 在CentOS 6操作系统,需要安装xfs文件系统:
1
yum install xfsprogs
3、安装、配置glusterd服务
1
yum install glusterfs-server
启动GlusterFS 管理进程
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#加入开机启动 systemctl enable glusterd ln -s '/usr/lib/systemd/system/glusterd.service' '/etc/systemd/system/multi-user.target.wants/glusterd.service' #启动glusterd systemctl start glusterd #查看glusterd状态信息 systemctl status glusterd ● glusterd.service - GlusterFS, a clustered file-system server Loaded: loaded (/usr/lib/systemd/system/glusterd.service; enabled; vendor preset: disabled) Active: active (running) since Thu 2018-11-15 12:08:54 EST; 15s ago Process: 2808 ExecStart=/usr/sbin/glusterd -p /var/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS (code=exited, status=0/SUCCESS) Main PID: 2810 (glusterd) Tasks: 8 CGroup: /system.slice/glusterd.service └─2810 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO
Nov 15 12:08:53 node1 systemd[1]: Starting GlusterFS, a clustered file-system server... Nov 15 12:08:54 node1 systemd[1]: Started GlusterFS, a clustered file-system server.
[root@node1 ~]# gluster peer probe node2 peer probe: failed: Probe returned with Transport endpoint is not connected
node2操作,将node1添加到受信任池
1 2
[root@node2 ~]# gluster peer probe node1 peer probe: success. Host node1 port 24007 already in peer list
6、建立GlusterFS volume
node1 and node2操作:
1
mkdir /gfs/test1/gv0
在任意一个节点上执行即可,不需要重复执行:
1 2 3 4 5 6
# gluster volume create gv0 replica 2 node1:/gfs/test1/gv0 node2:/gfs/test1/gv0 [root@node1 ~]# gluster volume create gv0 replica 2 node1:/gfs/test1/gv0 node2:/gfs/test1/gv0 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y volume create: gv0: success: please start the volume to access data
# gluster volume info [root@node1 ~]# gluster volume info Volume Name: gv0 Type: Replicate Volume ID: 79c81f10-0cb8-4f26-a7ab-d21fe19f0bbf Status: Started Snapshot Count: 0 Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv0 Brick2: node2:/gfs/test1/gv0 Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
/var/log/glusterfs如果没有正常启动,可以查看日志
7、测试Glusterfs volume gv0副本集是否生效
1 2 3 4 5 6 7 8
#挂在到任意空目录 mount -t glusterfs node1:/gv0 /mnt #创造测试数据 for i in `seq -w 1 100`; do cp -rp /var/log/messages /mnt/copy-test-$i; done #查看生成数据的数量 ls /mnt | wc -l #在node1和node2的/gfs/test1/gv0目录下均生成了100个文件 ls /gfs/test1/gv0 |wc -l
[root@node1 ~]# gluster volume create gv1 disperse 2 node1:/gfs/test1/gv1 node2:/gfs/test1/gv1 disperse count must be greater than 2 disperse option given twice
# gluster volume create gv4 replica 2 transport tcp node1:/gfs/test1/gv4 node2:/gfs/test1/gv4 node3:/gfs/test1/gv4 node4:/gfs/test1/gv4 [root@node1 test1]# gluster volume create gv4 replica 2 transport tcp node1:/gfs/test1/gv4 node2:/gfs/test1/gv4 node3:/gfs/test1/gv4 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y number of bricks is not a multiple of replica count #创建副本的节点数要和副本个数成倍数关系 [root@node1 test1]# gluster volume create gv4 replica 2 transport tcp node1:/gfs/test1/gv4 node2:/gfs/test1/gv4 node3:/gfs/test1/gv4 node4:/gfs/test1/gv4 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y volume create: gv4: success: please start the volume to access data [root@node1 test1]# gluster volume start gv4 [root@node1 test1]# gluster volume info gv4 Volume Name: gv4 Type: Distributed-Replicate Volume ID: e8556b2e-462d-4407-99c4-a6e622754e6c Status: Started Snapshot Count: 0 Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv4 Brick2: node2:/gfs/test1/gv4 Brick3: node3:/gfs/test1/gv4 Brick4: node4:/gfs/test1/gv4 Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
[root@node1 test1]# gluster volume create gv5 replica 2 arbiter 2 transport tcp node1:/gfs/test1/gv5 node2:/gfs/test1/gv5 node3:/gfs/test1/gv5 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y For arbiter configuration, replica count must be 3 and arbiter count must be 1. The 3rd brick of the replica will be the arbiter #提示创建仲裁必须是三个副本集 [root@node1 test1]# gluster volume create gv5 replica 3 arbiter 1 transport tcp node1:/gfs/test1/gv5 node2:/gfs/test1/gv5 node3:/gfs/test1/gv5 #启动gv5 [root@node1 test1]# gluster volume start gv5 [root@node1 test1]# gluster volume info gv5 Volume Name: gv5 Type: Replicate Volume ID: fd4fca20-1bb3-480b-9c24-703dd3e8b508 Status: Started Snapshot Count: 0 Number of Bricks: 1 x (2 + 1) = 3 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv5 Brick2: node2:/gfs/test1/gv5 Brick3: node3:/gfs/test1/gv5 (arbiter) Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
gluster volume info gv5 Volume Name: gv5 Type: Replicate Volume ID: e12e23f5-7347-4049-8d77-53cef76b0633 Status: Started Snapshot Count: 0 Number of Bricks: 1 x (2 + 1) = 3 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv5 Brick2: node2:/gfs/test1/gv5 Brick3: node3:/gfs/test1/gv5 (arbiter) Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
#查看所有卷的信息: gluster volume info all
#查看卷状态: gluster volume status [all| []] [detail|clients|mem|inode|fd|callpool] #显示所有卷的状态 gluster volume status all #显示卷额外的信息: gluster volume status gv5 detail #显示客户端列表: gluster volume status gv5 clients #显示内存使用情况: gluster volume status gv5 mem #显示卷的inode表 gluster volume status gv5 inode #显示卷打开的fd表 gluster volume status gv5 fd #显示卷的挂起调用 gluster volume status gv5 callpool
[root@localhost vsftpd]# more /etc/vsftpd/chroot_list |grep f1 f1 [root@localhost vsftpd]# more /etc/vsftpd/vsftpd.user_list |grep f1 f1
8、重新启动vsftp
1
[root@localhost ~]# systemctl vsftpd restart
9、测试用户能否正常登录:
1 2 3 4 5 6 7 8 9 10 11 12 13 14
[root@localhost surgeftp_2.3f2_linux64]# ./sslftp 192.168.168.120 Connected to 192.168.168.120 220 (vsFTPd 3.0.2) 234 Proceed with negotiation. starting SSL/TLS sslinit 3 Negotiated secure protocol TLSv1.2, using an AES cipher. 200 PBSZ set to 0. 200 PROT now Private. (secure) User: f1 331 Please specify the password. (secure) Password: ****** Connection problem SSLTCP:525:ssl_read tcp:-1000:SSL failure. (SSL_ERROR_SSL):error:1408F10B:SSL routines:SSL3_GET_RECORD:wrong version number Channel open, login Failed!
[root@localhost surgeftp_2.3f2_linux64]# ./sslftp 192.168.168.120 Connected to 192.168.168.120 220 (vsFTPd 3.0.2) 234 Proceed with negotiation. starting SSL/TLS sslinit 3 Negotiated secure protocol TLSv1.2, using an AES cipher. 200 PBSZ set to 0. 200 PROT now Private. (secure) User: f1 331 Please specify the password. (secure) Password: ****** 230 Login successful. Type in "save" to save login details to /root/.netrc sslftp> ls 226 Directory send OK. sslftp> exit 221 Goodbye. Channel Closed.
10、新增f2用户,修改vsftpd_users.conf配置文件,添加f2用户
1 2 3 4 5 6
[root@localhost ~]# vim /etc/vsftpd/vsftpd_users.conf #输入奇数行为账号,偶数行为密码 f1 123456 f2 123456
[root@localhost vsftpd]# more /etc/vsftpd/chroot_list |grep -E "f1|f2" f1 f2 [root@localhost vsftpd]# more /etc/vsftpd/vsftpd.user_list |grep -E "f1|f2" f1 f2
[root@localhost surgeftp_2.3f2_linux64]# ./sslftp 192.168.168.120 Connected to 192.168.168.120 220 (vsFTPd 3.0.2) 234 Proceed with negotiation. starting SSL/TLS sslinit 3 Negotiated secure protocol TLSv1.2, using an AES cipher. 200 PBSZ set to 0. 200 PROT now Private. (secure) User: f2 331 Please specify the password. (secure) Password: ****** 230 Login successful. Type in "save" to save login details to /root/.netrc sslftp>
zone "gfs.com" IN { type slave; masters { 10.0.30.95; }; allow-notify { 10.0.30.95; }; file "slaves/node.conf"; };
zone "30.0.10.in-addr.arpa" IN{ type slave; masters { 10.0.30.95; }; allow-notify { 10.0.30.95; }; file "slaves/node.txt"; };
13、从服务器启动dns服务
1
systemctl start named.service
14、查看/var/named/slaves目录下已经多出了node.*的两个文件
1 2 3 4 5 6
[root@node3 slaves]# pwd /var/named/slaves [root@node3 slaves]# ll total 8 -rw-r--r--. 1 named named 416 Nov 22 16:12 node.conf -rw-r--r--. 1 named named 514 Nov 22 16:12 node.txt
15、测试主从文件能否正常同步,修改主节点node.*配置文件
1 2 3 4 5 6
[root@node4 named]# vim node.conf [root@node4 named]# vim node.txt [root@node4 named]# [root@node4 named]# ll node.* -rw-r-----. 1 named root 265 Nov 22 18:48 node.conf -rw-r-----. 1 named root 272 Nov 22 18:48 node.txt
Nov 22 18:48:55 node3 named[98442]: client 10.0.30.95#52505: received notify for zone 'gfs.com' Nov 22 18:48:55 node3 named[98442]: zone gfs.com/IN: Transfer started. Nov 22 18:48:55 node3 named[98442]: transfer of 'gfs.com/IN' from 10.0.30.95#53: connected using 10.0.30.117#58148 Nov 22 18:48:55 node3 named[98442]: zone gfs.com/IN: transferred serial 3 Nov 22 18:48:55 node3 named[98442]: transfer of 'gfs.com/IN' from 10.0.30.95#53: Transfer completed: 1 messages, 9 records, 252 bytes, 0.008 secs (31500 bytes/sec) Nov 22 18:48:55 node3 named[98442]: zone gfs.com/IN: sending notifies (serial 3) Nov 22 18:48:55 node3 named[98442]: client 10.0.30.95#54023: received notify for zone '30.0.10.in-addr.arpa' Nov 22 18:48:55 node3 named[98442]: zone 30.0.10.in-addr.arpa/IN: Transfer started. Nov 22 18:48:55 node3 named[98442]: transfer of '30.0.10.in-addr.arpa/IN' from 10.0.30.95#53: connected using 10.0.30.117#51530 Nov 22 18:48:55 node3 named[98442]: zone 30.0.10.in-addr.arpa/IN: transferred serial 3 Nov 22 18:48:55 node3 named[98442]: transfer of '30.0.10.in-addr.arpa/IN' from 10.0.30.95#53: Transfer completed: 1 messages, 8 records, 249 bytes, 0.002 secs (124500 bytes/sec) Nov 22 18:48:55 node3 named[98442]: zone 30.0.10.in-addr.arpa/IN: sending notifies (serial 3)
18、查看从节点配置文件信息
1 2 3 4
[root@node3 slaves]# ll total 8 -rw-r--r--. 1 named named 375 Nov 22 18:48 node.conf -rw-r--r--. 1 named named 433 Nov 22 18:48 node.txt
从节点文件已和主节点正常同步
19、客户端测试主节点dns服务器挂掉从节点能否正常使用
1 2 3 4
[root@node2 ~]# more /etc/resolv.conf # Generated by NetworkManager nameserver 10.0.30.95 nameserver 10.0.30.117
[root@node4 named]# systemctl stop named.service [root@node4 named]# systemctl status named.service ● named.service - Berkeley Internet Name Domain (DNS) Loaded: loaded (/usr/lib/systemd/system/named.service; disabled; vendor preset: disabled) Active: inactive (dead)
Nov 22 18:48:55 node4 named[10366]: zone 30.0.10.in-addr.arpa/IN: sending notifies (serial 3) Nov 22 18:48:55 node4 named[10366]: client 10.0.30.117#58148 (gfs.com): transfer of 'gfs.com/IN': AXFR-style IXFR started Nov 22 18:48:55 node4 named[10366]: client 10.0.30.117#58148 (gfs.com): transfer of 'gfs.com/IN': AXFR-style IXFR ended Nov 22 18:48:55 node4 named[10366]: client 10.0.30.117#28664: received notify for zone 'gfs.com' Nov 22 18:48:55 node4 named[10366]: client 10.0.30.117#51530 (30.0.10.in-addr.arpa): transfer of '30.0.10.in-addr.arpa/IN': AXFR-style IXFR started Nov 22 18:48:55 node4 named[10366]: client 10.0.30.117#51530 (30.0.10.in-addr.arpa): transfer of '30.0.10.in-addr.arpa/IN': AXFR-style IXFR ended Nov 22 18:48:56 node4 named[10366]: client 10.0.30.117#41288: received notify for zone '30.0.10.in-addr.arpa' Nov 22 18:59:26 node4 systemd[1]: Stopping Berkeley Internet Name Domain (DNS)... Nov 22 18:59:26 node4 named[10366]: received control channel command 'stop' Nov 22 18:59:26 node4 systemd[1]: Stopped Berkeley Internet Name Domain (DNS).
Data_free:已分配但未使用的字节数。Innodb-tables代表当前表所属表空间的可用空间。如果该表位于共享表空间,该值代表共享表空间的剩余空间。如果使用多个表空间,每个表拥有自己的表空间,该值仅代表当前表的剩余空间。剩余空间的意思就是当前完全空闲的区字节数减去一个安全的边界值。即使该值为0,只要新的区可以被分配插入数据也是正常的。 对于NDB集群,该值显示磁盘分配的空间,但是还没有被磁盘数据表或者磁盘数据碎片使用的空间。 对于分区表,该值只是估计值并不准确。想要获取更加准确的方法使用下面方法查询: SELECT SUM(DATA_FREE) FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_SCHEMA = 'mydb' AND TABLE_NAME = 'mytable';
set global keyring_file_data='/data/mysql/keyfile/key01'; show global variables like '%keyring_file_data%'; root@db 16:28: [aaaa]> show global variables like '%keyring_file_data%'; +-------------------+---------------------------+ | Variable_name | Value | +-------------------+---------------------------+ | keyring_file_data | /data/mysql/keyfile/key01 | +-------------------+---------------------------+ 1 row in set (0.01 sec)
3.4、查看插件状态:
1 2 3 4 5 6 7 8 9
root@db 16:27: [aaaa]> SELECT PLUGIN_NAME, PLUGIN_STATUS -> FROM INFORMATION_SCHEMA.PLUGINS -> WHERE PLUGIN_NAME LIKE 'keyring%'; +--------------+---------------+ | PLUGIN_NAME | PLUGIN_STATUS | +--------------+---------------+ | keyring_file | ACTIVE | +--------------+---------------+ 1 row in set (0.01 sec)
DELIMITER $$ DROP PROCEDURE IF EXISTS `aaaa`.`sp_status` $$ CREATE PROCEDURE `aaaa`.`sp_status`(dbname VARCHAR(50)) BEGIN -- Obtaining tables and views ( SELECT TABLE_NAME AS `Table Name`, ENGINE AS `Engine`, TABLE_ROWS AS `Rows`, CONCAT( (FORMAT((DATA_LENGTH + INDEX_LENGTH) / POWER(1024,2),2)) , ' Mb') AS `Size`, TABLE_COLLATION AS `Collation` FROM information_schema.TABLES WHERE TABLES.TABLE_SCHEMA = dbname AND TABLES.TABLE_TYPE = 'BASE TABLE' ) UNION ( SELECT TABLE_NAME AS `Table Name`, '[VIEW]' AS `Engine`, '-' AS `Rows`, '-' `Size`, '-' AS `Collation` FROM information_schema.TABLES WHERE TABLES.TABLE_SCHEMA = dbname AND TABLES.TABLE_TYPE = 'VIEW' ) ORDER BY 1; -- Obtaining functions, procedures and triggers ( SELECT ROUTINE_NAME AS `Routine Name`, ROUTINE_TYPE AS `Type`, '' AS `Comment` FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = dbname ORDER BY ROUTINES.ROUTINE_TYPE, ROUTINES.ROUTINE_NAME ) UNION ( SELECT TRIGGER_NAME,'TRIGGER' AS `Type`, concat('On ',EVENT_MANIPULATION,': ',EVENT_OBJECT_TABLE) AS `Comment` FROM information_schema.TRIGGERS WHERE EVENT_OBJECT_SCHEMA = dbname ) ORDER BY 2,1; END$$ DELIMITER ;
Generating a 1024 bit RSA private key ..................................++++++ .........++++++ writing new private key to '/etc/vsftpd/vsftpd.pem' ----- You are about to be asked to enter information that will be incorporated into your certificate request. What you are about to enter is what is called a Distinguished Name or a DN. There are quite a few fields but you can leave some blank For some fields there will be a default value, If you enter '.', the field will be left blank. ----- Country Name (2 letter code) [XX]:cn State or Province Name (full name) []:beijing Locality Name (eg, city) [Default City]:beijing Organization Name (eg, company) [Default Company Ltd]:goopal Organizational Unit Name (eg, section) []:goopal Common Name (eg, your name or your server's hostname) []:zeven Email Address []:test@goopal.com
查看生成vsftpd.pem是否成功 ls -l /etc/vsftpd/|grep vsftpd.pem
MySQL dax-mysql-master:3306 JS > cluster.dissolve() The cluster still has active ReplicaSets. Please use cluster.dissolve({force: true}) to deactivate replication and unregister the ReplicaSets from the cluster.
The following replicasets are currently registered: { "clusterName": "prodCluster", "defaultReplicaSet": { "name": "default", "topology": [ { "address": "dax-mysql-slave:3306", "label": "dax-mysql-slave:3306", "role": "HA" }, { "address": "dax-mysql-master:3306", "label": "dax-mysql-master:3306", "role": "HA" } ] } } MySQL dax-mysql-master:3306 JS > cluster.dissolve({force: true}) WARNING: On instance 'dax-mysql-slave:3306' configuration cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please set the 'group_replication_start_on_boot' variable to 'OFF' in the server configuration file, otherwise it might rejoin the cluster upon restart. WARNING: On instance 'dax-mysql-master:3306' configuration cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please set the 'group_replication_start_on_boot' variable to 'OFF' in the server configuration file, otherwise it might rejoin the cluster upon restart. The cluster was successfully dissolved. Replication was disabled but user data was left intact.
3、查看集群信息已经被删掉了:
1 2
root@db 16:54: [mysql_innodb_cluster_metadata]> select * from clusters; Empty set (0.00 sec)
4、重新建立集群:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
MySQL dax-mysql-master:3306 JS > var cluster = dba.createCluster('prodCluster'); A new InnoDB cluster will be created on instance 'repl@dax-mysql-master:3306'.
Validating instance at dax-mysql-master:3306...
This instance reports its own address as dax-mysql-master WARNING: The following tables do not have a Primary Key or equivalent column: aaaa.test
Group Replication requires tables to use InnoDB and have a PRIMARY KEY or PRIMARY KEY Equivalent (non-null unique key). Tables that do not follow these requirements will be readable but not updateable when used with Group Replication. If your applications make updates (INSERT, UPDATE or DELETE) to these tables, ensure they use the InnoDB storage engine and have a PRIMARY KEY or PRIMARY KEY Equivalent.
Instance configuration is suitable. Creating InnoDB cluster 'prodCluster' on 'repl@dax-mysql-master:3306'... WARNING: On instance 'dax-mysql-master:3306' membership change cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please use the <Dba>.configureLocalInstance command locally to persist the changes. Adding Seed Instance...
Cluster successfully created. Use Cluster.addInstance() to add MySQL instances. At least 3 instances are needed for the cluster to be able to withstand up to one server failure.
MySQL dax-mysql-master:3306 JS > cluster.addInstance('dax-mysql-slave:3306') A new instance will be added to the InnoDB cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours.
Please provide the password for 'root@dax-mysql-slave:3306': ******** Adding instance to the cluster ...
Validating instance at dax-mysql-slave:3306...
This instance reports its own address as dax-mysql-slave WARNING: The following tables do not have a Primary Key or equivalent column: aaaa.test
Group Replication requires tables to use InnoDB and have a PRIMARY KEY or PRIMARY KEY Equivalent (non-null unique key). Tables that do not follow these requirements will be readable but not updateable when used with Group Replication. If your applications make updates (INSERT, UPDATE or DELETE) to these tables, ensure they use the InnoDB storage engine and have a PRIMARY KEY or PRIMARY KEY Equivalent.
Instance configuration is suitable. WARNING: On instance 'dax-mysql-slave:3306' membership change cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please use the <Dba>.configureLocalInstance command locally to persist the changes. WARNING: On instance 'dax-mysql-master:3306' membership change cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please use the <Dba>.configureLocalInstance command locally to persist the changes. The instance 'root@dax-mysql-slave:3306' was successfully added to the cluster.
root@db 17:14: [mysql_innodb_cluster_metadata]> show status like '%primary%'; +----------------------------------+--------------------------------------+ | Variable_name | Value | +----------------------------------+--------------------------------------+ | group_replication_primary_member | d5bd8edd-9a1d-11e8-993e-525400578639 | +----------------------------------+--------------------------------------+ 1 row in set (0.01 sec)
mysql> stop group_replication; mysql> set global group_replication_single_primary_mode=OFF; mysql> set global group_replication_enforce_update_everywhere_checks=ON;
2、随便选择某个节点执行
1 2 3
mysql> SET GLOBAL group_replication_bootstrap_group=ON; mysql> START GROUP_REPLICATION; mysql> SET GLOBAL group_replication_bootstrap_group=OFF;
3、其他节点执行
1
mysql> START GROUP_REPLICATION;
查看节点启动报错:
1 2
2018-11-08T14:00:52.098660Z 56 [ERROR] Plugin group_replication reported: 'There was an error when connecting to the donor server. Please check that group_replication_recovery channel credentials and all MEMBER_HOST column values of performance_schema.replication_group_members table are correct and DNS resolvable.' 2018-11-08T14:00:52.098689Z 56 [ERROR] Plugin group_replication reported: 'For details please check performance_schema.replication_connection_status table and error log messages of Slave I/O for channel group_replication_recovery.'
手动配置复制信息:
1
change master to master_user='repl',master_password='12345678' for channel 'group_replication_recovery';
MySQL dax-mysql-master:3306 JS > cluster = dba.getCluster("prodCluster") <Cluster:prodCluster>
MySQL dax-mysql-master:3306 JS > cluster.status(); Cluster.status: The InnoDB Cluster topology type (Single-Master) does not match the current Group Replication configuration (Multi-Master). Please use <cluster>.rescan() or change the Group Replication configuration accordingly. (RuntimeError)
MySQL dax-mysql-master:3306 JS > cluster.rescan() Rescanning the cluster...
Result of the rescanning operation: { "defaultReplicaSet": { "name": "default", "newlyDiscoveredInstances": [], "unavailableInstances": [] } }
再次查看集群状态仍然报错:
1 2
MySQL dax-mysql-master:3306 JS > cluster.status(); Cluster.status: The InnoDB Cluster topology type (Single-Master) does not match the current Group Replication configuration (Multi-Master). Please use <cluster>.rescan() or change the Group Replication configuration accordingly. (RuntimeError)
尝试删除集群元数据:
1 2 3
dba.dropMetadataSchema(); Are you sure you want to remove the Metadata? [y/N]: y Metadata Schema successfully removed.
重新创建集群
1
var cluster = dba.createCluster('prodCluster', {adoptFromGR: true ,force: true});
报错提示:
1
[ERROR] Plugin group_replication reported: 'Table instances has a foreign key with 'CASCADE' clause. This is not compatible with Group Replication'
ALTER TABLE `mysql_innodb_cluster_metadata`.`instances` DROP FOREIGN KEY `instances_ibfk_1`; ALTER TABLE `mysql_innodb_cluster_metadata`.`instances` DROP FOREIGN KEY `instances_ibfk_2`;
MySQL dax-mysql-master:3306 JS > var cluster = dba.createCluster('prodCluster', {adoptFromGR: true ,force: true}); A new InnoDB cluster will be created based on the existing replication group on instance 'repl@dax-mysql-master:3306'.
mysql> stop group_replication; mysql> set global group_replication_enforce_update_everywhere_checks=OFF; mysql> set global group_replication_single_primary_mode=ON;
8、主节点(dax-mysql-master)执行
1 2 3
SET GLOBAL group_replication_bootstrap_group=ON; START GROUP_REPLICATION; SET GLOBAL group_replication_bootstrap_group=OFF;
show status like '%primary%'; +----------------------------------+--------------------------------------+ | Variable_name | Value | +----------------------------------+--------------------------------------+ | group_replication_primary_member | d5bd8edd-9a1d-11e8-993e-525400578639 | +----------------------------------+--------------------------------------+ 1 row in set (0.01 sec)
MySQL dax-mysql-master:3306 JS > cluster = dba.getCluster("prodCluster") <Cluster:prodCluster>
MySQL dax-mysql-master:3306 JS > cluster.status(); Cluster.status: The InnoDB Cluster topology type (Multi-Master) does not match the current Group Replication configuration (Single-Master). Please use <cluster>.rescan() or change the Group Replication configuration accordingly. (RuntimeError)
MySQL dax-mysql-master:3306 JS > cluster.rescan() Rescanning the cluster...
Result of the rescanning operation: { "defaultReplicaSet": { "name": "default", "newlyDiscoveredInstances": [], "unavailableInstances": [] } }
MySQL dax-mysql-master:3306 JS > cluster.status(); Cluster.status: The InnoDB Cluster topology type (Multi-Master) does not match the current Group Replication configuration (Single-Master). Please use <cluster>.rescan() or change the Group Replication configuration accordingly. (RuntimeError)
跟多主切换为单主报错相同,尝试删除集群元数据:
1 2 3
dba.dropMetadataSchema(); Are you sure you want to remove the Metadata? [y/N]: y Metadata Schema successfully removed.
重新创建集群:
1 2 3 4 5 6 7 8
MySQL dax-mysql-master:3306 JS > var cluster = dba.createCluster('prodCluster', {adoptFromGR: true}); A new InnoDB cluster will be created based on the existing replication group on instance 'repl@dax-mysql-master:3306'.
将从节点移除集群: MySQL dax-mysql-master:3306 JS > cluster.removeInstance(‘dax-mysql-slave:3306’)
1 2 3 4 5 6 7 8 9
The instance will be removed from the InnoDB cluster. Depending on the instance being the Seed or not, the Metadata session might become invalid. If so, please start a new session to the Metadata Storage R/W instance.
WARNING: On instance 'dax-mysql-master:3306' membership change cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please use the <Dba>.configureLocalInstance command locally to persist the changes. WARNING: On instance 'dax-mysql-slave:3306' configuration cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please set the 'group_replication_start_on_boot' variable to 'OFF' in the server configuration file, otherwise it might rejoin the cluster upon restart. The instance 'dax-mysql-slave:3306' was successfully removed from the cluster.
WARNING: The 'group_replication_start_on_boot' variable must be set to 'OFF' in the server configuration file, otherwise it might silently rejoin the cluster upon restart.
重新扫描集群信息:
1 2 3 4 5 6 7 8 9 10 11
MySQL dax-mysql-master:3306 JS > cluster.rescan(); Rescanning the cluster...
Result of the rescanning operation: { "defaultReplicaSet": { "name": "default", "newlyDiscoveredInstances": [], "unavailableInstances": [] } }
MySQL dax-mysql-master:3306 JS > cluster.status(); { "clusterName": "prodCluster", "defaultReplicaSet": { "name": "default", "primary": "dax-mysql-master:3306", "ssl": "DISABLED", "status": "OK_NO_TOLERANCE", "statusText": "Cluster is NOT tolerant to any failures.", "topology": { "dax-mysql-master:3306": { "address": "dax-mysql-master:3306", "mode": "R/W", "readReplicas": {}, "role": "HA", "status": "ONLINE" } } }, "groupInformationSourceMember": "mysql://repl@dax-mysql-master:3306" }
3、从节点修改server-id,并重启库,之后将从节点加入集群:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
mysql-shell > cluster.addInstance('dax-mysql-slave:3306') A new instance will be added to the InnoDB cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours.
Please provide the password for 'root@dax-mysql-slave:3306': ******** Adding instance to the cluster ...
Validating instance at dax-mysql-slave:3306...
This instance reports its own address as dax-mysql-slave WARNING: The following tables do not have a Primary Key or equivalent column: aaaa.test
Group Replication requires tables to use InnoDB and have a PRIMARY KEY or PRIMARY KEY Equivalent (non-null unique key). Tables that do not follow these requirements will be readable but not updateable when used with Group Replication. If your applications make updates (INSERT, UPDATE or DELETE) to these tables, ensure they use the InnoDB storage engine and have a PRIMARY KEY or PRIMARY KEY Equivalent.
Instance configuration is suitable. WARNING: On instance 'dax-mysql-slave:3306' membership change cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please use the <Dba>.configureLocalInstance command locally to persist the changes. WARNING: On instance 'dax-mysql-master:3306' membership change cannot be persisted since MySQL version 5.7.22 does not support the SET PERSIST command (MySQL version >= 8.0.5 required). Please use the <Dba>.configureLocalInstance command locally to persist the changes. The instance 'root@dax-mysql-slave:3306' was successfully added to the cluster.
select username,default_tablespace from dba_users where username='SCOTT'; USERNAME DEFAULT_TABLESPACE ------------------------------ ------------------------------ SCOTT USERS
查看users表空间初始化extent大小:
1 2 3 4 5
select TABLESPACE_NAME,BLOCK_SIZE,INITIAL_EXTENT,NEXT_EXTENT,MIN_EXTENTS,MAX_EXTENTS,EXTENT_MANAGEMENT,ALLOCATION_TYPE from user_tablespaces where tablespace_name='USERS';
NAME TYPE VALUE ------------------------------------ ----------- ------------------------------ db_block_size integer 8192
创建测试表:
1 2 3 4 5
11:43:49 SCOTT@ boston> create table t1 as select * from emp where 0=1; Table created. 表数据为空,查看是否有extent区在: 11:43:54 SYS@ boston> select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T1'; no rows selected
向t1表内插入数据:
1 2
11:43:50 SCOTT@ boston> insert into t1 select * from emp; 14 rows created.
查看已分配出一个8个block的extent:
1 2 3 4 5 6
11:43:55 SYS@ boston> select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T1';
create tablespace test datafile '/data/u01/app/oracle/oradata/boston/test01.dbf' size 100M extent management local uniform size 2m;
查看表空间参数
1 2 3 4 5
15:28:39 SYS@ boston> select TABLESPACE_NAME,BLOCK_SIZE,INITIAL_EXTENT,NEXT_EXTENT,MIN_EXTENTS,MAX_EXTENTS,EXTENT_MANAGEMENT,ALLOCATION_TYPE from user_tablespaces where tablespace_name='TEST';
select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T1'; 15:32:51 SYS@ boston> select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T1'; no rows selected Elapsed: 00:00:00.04
插入数据
1 2
15:33:16 SCOTT@ boston> insert into t1 select * from emp; 14 rows created.
为t1表分配了一个2m大小的extent:
1 2 3 4
15:32:54 SYS@ boston> select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T1'; SEGMENT_NAME EXTENT_ID FILE_ID BLOCK_ID BLOCKS --------------------------------------------------------------------------------- ---------- ---------- ---------- ---------- T1 0 5 128 256
批量插入数据
1 2 3 4 5 6 7 8
15:48:07 SYS@ boston> select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T1';
select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T2'; no rows selected Elapsed: 00:00:00.11
插入数据:
1
insert into t2 select * from emp;
因为初始化设置为5m,而每个extent大小统一为2m,因此分配出三个extent:
1 2 3 4 5 6 7
select SEGMENT_NAME,EXTENT_ID,FILE_ID,BLOCK_ID,BLOCKS from dba_extents where OWNER='SCOTT' AND SEGMENT_NAME='T2';