mount.sh
                        
                             · 1.3 KiB · Bash
                        
                    
                    
                      
                        Brut
                      
                    
                      
                    
                        
                          
                        
                    
                    
                
                
                
            # Assuming using /dev/sdb is your new empty disk
# Assuming 10.0.0.* is your private network
# Install and configure GlusterFS. (Run on all nodes)
apt-get install glusterfs-server -y
systemctl start glusterd
systemctl enable glusterd
# Format the disk and mount it (Run on all nodes)
mkfs.xfs /dev/sdb
echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab
mkdir -p /var/no-direct-write-here/gluster-bricks
mount /var/no-direct-write-here/gluster-bricks
# Add the peers (Run on node1)
gluster peer probe node2
gluster peer probe node3
gluster peer status
gluster pool list
# Create the volume (Run on node1)
gluster volume create swarm-vol replica 3 \
  node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \
  node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \
  node3:/var/no-direct-write-here/gluster-bricks/swarm-vol
gluster volume set gfs auth.allow 10.64.50.*
gluster volume start swarm-vol
gluster volume status
gluster volume info swarm-vol
# Mount the volume (Run on all nodes)
echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab
mkdir -p /swarm-vol
mount /swarm-vol
chown -R root:docker /swarm-vol
# Hack to fix gluster not mounting on boot (Run on all nodes)
echo -e "\n\nsleep 10s && mount -a" >> /etc/rc.local
                | 1 | # Assuming using /dev/sdb is your new empty disk | 
| 2 | # Assuming 10.0.0.* is your private network | 
| 3 | |
| 4 | # Install and configure GlusterFS. (Run on all nodes) | 
| 5 | apt-get install glusterfs-server -y | 
| 6 | systemctl start glusterd | 
| 7 | systemctl enable glusterd | 
| 8 | |
| 9 | # Format the disk and mount it (Run on all nodes) | 
| 10 | mkfs.xfs /dev/sdb | 
| 11 | echo '/dev/sdb /var/no-direct-write-here/gluster-bricks xfs defaults 0 0' >> /etc/fstab | 
| 12 | mkdir -p /var/no-direct-write-here/gluster-bricks | 
| 13 | mount /var/no-direct-write-here/gluster-bricks | 
| 14 | |
| 15 | # Add the peers (Run on node1) | 
| 16 | gluster peer probe node2 | 
| 17 | gluster peer probe node3 | 
| 18 | gluster peer status | 
| 19 | gluster pool list | 
| 20 | |
| 21 | # Create the volume (Run on node1) | 
| 22 | gluster volume create swarm-vol replica 3 \ | 
| 23 | node1:/var/no-direct-write-here/gluster-bricks/swarm-vol \ | 
| 24 | node2:/var/no-direct-write-here/gluster-bricks/swarm-vol \ | 
| 25 | node3:/var/no-direct-write-here/gluster-bricks/swarm-vol | 
| 26 | gluster volume set gfs auth.allow 10.64.50.* | 
| 27 | gluster volume start swarm-vol | 
| 28 | gluster volume status | 
| 29 | gluster volume info swarm-vol | 
| 30 | |
| 31 | # Mount the volume (Run on all nodes) | 
| 32 | echo 'localhost:/swarm-vol /swarm-vol glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab | 
| 33 | mkdir -p /swarm-vol | 
| 34 | mount /swarm-vol | 
| 35 | chown -R root:docker /swarm-vol | 
| 36 | |
| 37 | # Hack to fix gluster not mounting on boot (Run on all nodes) | 
| 38 | echo -e "\n\nsleep 10s && mount -a" >> /etc/rc.local | 
| 39 |