#格式化磁盘,创建相应的挂在目录 mkfs.xfs -i size=512 /dev/sdb1 mkdir -p /gfs/test1 #将挂在配置信息写入fstab配置文件,以便重启自动挂载 vi /etc/fstab /dev/sdb1 /gfs/test1 xfs defaults 1 2 #加载修改的配置信息 mount -a && mount
Note: 在CentOS 6操作系统,需要安装xfs文件系统:
1
yum install xfsprogs
3、安装、配置glusterd服务
1
yum install glusterfs-server
启动GlusterFS 管理进程
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#加入开机启动 systemctl enable glusterd ln -s '/usr/lib/systemd/system/glusterd.service' '/etc/systemd/system/multi-user.target.wants/glusterd.service' #启动glusterd systemctl start glusterd #查看glusterd状态信息 systemctl status glusterd ● glusterd.service - GlusterFS, a clustered file-system server Loaded: loaded (/usr/lib/systemd/system/glusterd.service; enabled; vendor preset: disabled) Active: active (running) since Thu 2018-11-15 12:08:54 EST; 15s ago Process: 2808 ExecStart=/usr/sbin/glusterd -p /var/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS (code=exited, status=0/SUCCESS) Main PID: 2810 (glusterd) Tasks: 8 CGroup: /system.slice/glusterd.service └─2810 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO
Nov 15 12:08:53 node1 systemd[1]: Starting GlusterFS, a clustered file-system server... Nov 15 12:08:54 node1 systemd[1]: Started GlusterFS, a clustered file-system server.
[root@node1 ~]# gluster peer probe node2 peer probe: failed: Probe returned with Transport endpoint is not connected
node2操作,将node1添加到受信任池
1 2
[root@node2 ~]# gluster peer probe node1 peer probe: success. Host node1 port 24007 already in peer list
6、建立GlusterFS volume
node1 and node2操作:
1
mkdir /gfs/test1/gv0
在任意一个节点上执行即可,不需要重复执行:
1 2 3 4 5 6
# gluster volume create gv0 replica 2 node1:/gfs/test1/gv0 node2:/gfs/test1/gv0 [root@node1 ~]# gluster volume create gv0 replica 2 node1:/gfs/test1/gv0 node2:/gfs/test1/gv0 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y volume create: gv0: success: please start the volume to access data
# gluster volume info [root@node1 ~]# gluster volume info Volume Name: gv0 Type: Replicate Volume ID: 79c81f10-0cb8-4f26-a7ab-d21fe19f0bbf Status: Started Snapshot Count: 0 Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv0 Brick2: node2:/gfs/test1/gv0 Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
/var/log/glusterfs如果没有正常启动,可以查看日志
7、测试Glusterfs volume gv0副本集是否生效
1 2 3 4 5 6 7 8
#挂在到任意空目录 mount -t glusterfs node1:/gv0 /mnt #创造测试数据 for i in `seq -w 1 100`; do cp -rp /var/log/messages /mnt/copy-test-$i; done #查看生成数据的数量 ls /mnt | wc -l #在node1和node2的/gfs/test1/gv0目录下均生成了100个文件 ls /gfs/test1/gv0 |wc -l
[root@node1 ~]# gluster volume create gv1 disperse 2 node1:/gfs/test1/gv1 node2:/gfs/test1/gv1 disperse count must be greater than 2 disperse option given twice
# gluster volume create gv4 replica 2 transport tcp node1:/gfs/test1/gv4 node2:/gfs/test1/gv4 node3:/gfs/test1/gv4 node4:/gfs/test1/gv4 [root@node1 test1]# gluster volume create gv4 replica 2 transport tcp node1:/gfs/test1/gv4 node2:/gfs/test1/gv4 node3:/gfs/test1/gv4 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y number of bricks is not a multiple of replica count #创建副本的节点数要和副本个数成倍数关系 [root@node1 test1]# gluster volume create gv4 replica 2 transport tcp node1:/gfs/test1/gv4 node2:/gfs/test1/gv4 node3:/gfs/test1/gv4 node4:/gfs/test1/gv4 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y volume create: gv4: success: please start the volume to access data [root@node1 test1]# gluster volume start gv4 [root@node1 test1]# gluster volume info gv4 Volume Name: gv4 Type: Distributed-Replicate Volume ID: e8556b2e-462d-4407-99c4-a6e622754e6c Status: Started Snapshot Count: 0 Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv4 Brick2: node2:/gfs/test1/gv4 Brick3: node3:/gfs/test1/gv4 Brick4: node4:/gfs/test1/gv4 Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
[root@node1 test1]# gluster volume create gv5 replica 2 arbiter 2 transport tcp node1:/gfs/test1/gv5 node2:/gfs/test1/gv5 node3:/gfs/test1/gv5 Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/. Do you still want to continue? (y/n) y For arbiter configuration, replica count must be 3 and arbiter count must be 1. The 3rd brick of the replica will be the arbiter #提示创建仲裁必须是三个副本集 [root@node1 test1]# gluster volume create gv5 replica 3 arbiter 1 transport tcp node1:/gfs/test1/gv5 node2:/gfs/test1/gv5 node3:/gfs/test1/gv5 #启动gv5 [root@node1 test1]# gluster volume start gv5 [root@node1 test1]# gluster volume info gv5 Volume Name: gv5 Type: Replicate Volume ID: fd4fca20-1bb3-480b-9c24-703dd3e8b508 Status: Started Snapshot Count: 0 Number of Bricks: 1 x (2 + 1) = 3 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv5 Brick2: node2:/gfs/test1/gv5 Brick3: node3:/gfs/test1/gv5 (arbiter) Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
gluster volume info gv5 Volume Name: gv5 Type: Replicate Volume ID: e12e23f5-7347-4049-8d77-53cef76b0633 Status: Started Snapshot Count: 0 Number of Bricks: 1 x (2 + 1) = 3 Transport-type: tcp Bricks: Brick1: node1:/gfs/test1/gv5 Brick2: node2:/gfs/test1/gv5 Brick3: node3:/gfs/test1/gv5 (arbiter) Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off
#查看所有卷的信息: gluster volume info all
#查看卷状态: gluster volume status [all| []] [detail|clients|mem|inode|fd|callpool] #显示所有卷的状态 gluster volume status all #显示卷额外的信息: gluster volume status gv5 detail #显示客户端列表: gluster volume status gv5 clients #显示内存使用情况: gluster volume status gv5 mem #显示卷的inode表 gluster volume status gv5 inode #显示卷打开的fd表 gluster volume status gv5 fd #显示卷的挂起调用 gluster volume status gv5 callpool