Friday, August 29, 2014

Setup QCOW2 standard CentOS 7 cloud image to work with 2 VLANs on IceHouse ML2&OVS&GRE System

Notice, that same schema would work for any F20 or Ubuntu QCOW2 cloud images via qemu-nbd mount and increasing number of NICs interface files up to 2,3,...
Approach suggested down here  is universal. Any cinder volume been built up on
updated glance image ( 2 NICs ready ) would be 2 NICs ready as well

*********************************************
Update qcow2 image for 2 NIC interfaces
*********************************************
[root@icehouse1 Downloads]# modprobe nbd max_part=63
[root@icehouse1 Downloads]# qemu-nbd -c /dev/nbd0 CentOS-7-x86_64-GenericCloud-20140826_02.qcow2
[root@icehouse1 Downloads]# mount /dev/nbd0p1 /mnt/image
[root@icehouse1 Downloads]# chroot /mnt/image
[root@icehouse1 /]# cd /etc/sysconfig/network-*
[root@icehouse1 network-scripts]# ls
ifcfg-eth0   ifdown-ipv6    ifdown-Team      ifup-eth    ifup-post      ifup-tunnel
ifcfg-lo     ifdown-isdn    ifdown-TeamPort  ifup-ippp   ifup-ppp       ifup-wireless
ifdown       ifdown-post    ifdown-tunnel    ifup-ipv6   ifup-routes    init.ipv6-global
ifdown-bnep  ifdown-ppp     ifup             ifup-isdn   ifup-sit       network-functions
ifdown-eth   ifdown-routes  ifup-aliases     ifup-plip   ifup-Team      network-functions-ipv6
ifdown-ippp  ifdown-sit     ifup-bnep        ifup-plusb  ifup-TeamPort
[root@icehouse1 network-scripts]# cp ifcfg-eth0 ifcfg-eth1
[root@icehouse1 network-scripts]# vi ifcfg-eth1
[root@icehouse1 network-scripts]# cat ifcfg-eth1
DEVICE="eth1"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"
USERCTL="yes"
PEERDNS="yes"
IPV6INIT="no"
PERSISTENT_DHCLIENT="1"
[root@icehouse1 network-scripts]# exit
exit
****************************
Libguestfs  tools
****************************

sudo yum install libguestfs-tools      # Fedora/RHEL/CentOS
sudo apt-get install libguestfs-tools  # Debian/Ubuntu


[boris@icehouse1 Downloads]$  guestfish --rw -a trusty-server-cloudimg-amd64-disk1.img

Welcome to guestfish, the guest filesystem shell for
editing virtual machine filesystems and disk images.

Type: 'help' for help on commands
      'man' to read the manual
      'quit' to quit the shell

> run
> list-filesystems
/dev/sda1: ext4
> mount /dev/sda1 /
> ls /etc/network/interfaces.d
eth0.cfg
> cp  /etc/network/interfaces.d/eth0.cfg /etc/network/interfaces.d/eth1.cfg
> edit /etc/network/interfaces.d/eth1.cfg
> ls  /etc/network/interfaces.d/
eth0.cfg
eth1.cfg
> cat /etc/network/interfaces.d/eth1.cfg
# The primary network interface
auto eth1
iface eth1 inet dhcp

> cat /etc/network/interfaces.d/eth0.cfg
# The primary network interface
auto eth0
iface eth0 inet dhcp


[boris@icehouse1 Downloads]$  guestfish --rw -a  Fedora-x86_64-20-20140407-sda.qcow2

Welcome to guestfish, the guest filesystem shell for
editing virtual machine filesystems and disk images.

Type: 'help' for help on commands
      'man' to read the manual
      'quit' to quit the shell

> run
 100% ⟦▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒⟧ --:--
> list-filesystems
/dev/sda1: ext4
> mount /dev/sda1 /
> cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth1
> edit  /etc/sysconfig/network-scripts/ifcfg-eth1
> cat  /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE="eth1"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"

> cat  /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE="eth0"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"

> exit


*********************************************************
Upload image to glance and launch VM to this image
*********************************************************

   

  
  
  
  

[root@icehouse1 Downloads]# ssh centos@192.168.1.204
The authenticity of host '192.168.1.204 (192.168.1.204)' can't be established.
ECDSA key fingerprint is 46:54:d3:46:e3:d1:e0:a8:57:af:a8:22:f6:3a:ed:ea.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.1.204' (ECDSA) to the list of known hosts.
centos@192.168.1.204's password:
Last login: Sat Aug 30 06:00:10 2014
[centos@centos07twonic ~]$ sudo su
[root@centos07twonic centos]# ifconfig
eth0: flags=4163  mtu 1454
        inet 10.0.0.19  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::f816:3eff:fe9a:59f8  prefixlen 64  scopeid 0x20
        ether fa:16:3e:9a:59:f8  txqueuelen 1000  (Ethernet)
        RX packets 255  bytes 32133 (31.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 314  bytes 33467 (32.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163  mtu 1454
        inet 40.0.0.67  netmask 255.255.255.0  broadcast 40.0.0.255
        inet6 fe80::f816:3eff:fe6c:3c8d  prefixlen 64  scopeid 0x20
        ether fa:16:3e:6c:3c:8d  txqueuelen 1000  (Ethernet)
        RX packets 27  bytes 2762 (2.6 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 31  bytes 4869 (4.7 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 12  bytes 976 (976.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12  bytes 976 (976.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


[root@icehouse1 ~(keystone_admin)]# neutron net-list
+--------------------------------------+---------+-----------------------------------------------------+
| id                                   | name    | subnets                                             |
+--------------------------------------+---------+-----------------------------------------------------+
| 8b22b262-c9c1-4138-8092-0581195f0889 | private | 06530f5a-31af-4a14-a40f-808ee2e9e3ad 40.0.0.0/24    |
| 140d25a4-0d98-4424-a35a-2a985b2f0a17 | demonet | f2e318f8-05c6-4dda-8e8e-07f7a8f2c91a 10.0.0.0/24    |
| 295a5bba-c219-407f-830d-911cd2214349 | public  | c8421c61-7d85-4cf8-a5c8-03c05982bff9 192.168.1.0/24 |
+--------------------------------------+---------+-----------------------------------------------------+
 

[root@icehouse1 ~(keystone_admin)]# ip netns
qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b
qrouter-4135e351-9ae4-4e89-9b23-7b131b2c4e6c
qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17
qdhcp-8b22b262-c9c1-4138-8092-0581195f0889
 

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-8b22b262-c9c1-4138-8092-0581195f0889 ssh centos@40.0.0.67
 

The authenticity of host '40.0.0.67 (40.0.0.67)' can't be established.
ECDSA key fingerprint is 46:54:d3:46:e3:d1:e0:a8:57:af:a8:22:f6:3a:ed:ea.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '40.0.0.67' (ECDSA) to the list of known hosts.
centos@40.0.0.67's password:
Last login: Sat Aug 30 15:20:36 2014 from 10.0.0.11
 

[centos@centos07twonic ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
10.0.0.19


[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17 ssh   centos@10.0.0.19
centos@10.0.0.19's password:
Last login: Sat Aug 30 15:19:04 2014 from 10.0.0.11
[centos@centos07twonic ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
10.0.0.19






   Same procedure done against Fedora 20 cloud image :-
  


[root@icehouse1 Downloads(keystone_admin)]# modprobe nbd max_part=63[root@icehouse1 Downloads(keystone_admin)]# qemu-nbd -c /dev/nbd0 Fedora-x86_64-20-20140407-sda.qcow2
[root@icehouse1 Downloads(keystone_admin)]# mount /dev/nbd0p1 /mnt/image
[root@icehouse1 Downloads(keystone_admin)]# chroot /mnt/image

[root@icehouse1 /(keystone_admin)]# ls -l
total 76
lrwxrwxrwx.  1 root root     7 Apr  8 02:28 bin -> usr/bin
dr-xr-xr-x.  4 root root  4096 Apr  8 02:29 boot
drwxr-xr-x.  4 root root  4096 Apr  8 02:29 dev
drwxr-xr-x. 63 root root  4096 Apr  8 02:30 etc
drwxr-xr-x.  2 root root  4096 Aug  7  2013 home
lrwxrwxrwx.  1 root root     7 Apr  8 02:28 lib -> usr/lib
lrwxrwxrwx.  1 root root     9 Apr  8 02:28 lib64 -> usr/lib64
drwx------.  2 root root 16384 Apr  8 02:27 lost+found
drwxr-xr-x.  2 root root  4096 Aug  7  2013 media
drwxr-xr-x.  2 root root  4096 Aug  7  2013 mnt
drwxr-xr-x.  2 root root  4096 Aug  7  2013 opt
drwxrwxr-x.  2 root root  4096 Apr  8 02:27 proc
dr-xr-x---.  2 root root  4096 Apr  8 02:29 root
drwxr-xr-x.  8 root root  4096 Apr  8 02:29 run
lrwxrwxrwx.  1 root root     8 Apr  8 02:28 sbin -> usr/sbin
drwxr-xr-x.  2 root root  4096 Aug  7  2013 srv
drwxrwxr-x.  2 root root  4096 Apr  8 02:27 sys
drwxrwxrwt.  2 root root  4096 Aug 30 07:39 tmp
drwxr-xr-x. 12 root root  4096 Apr  8 02:28 usr
drwxr-xr-x. 18 root root  4096 Apr  8 02:28 var

[root@icehouse1 /(keystone_admin)]# cd /etc/sysconfig/network-*
[root@icehouse1 network-scripts(keystone_admin)]# ls
ifcfg-eth0   ifdown-ipv6    ifdown-tunnel  ifup-ipv6   ifup-ppp          network-functions
ifcfg-lo     ifdown-isdn    ifup           ifup-ipx    ifup-routes       network-functions-ipv6
ifdown       ifdown-post    ifup-aliases   ifup-isdn   ifup-sit
ifdown-bnep  ifdown-ppp     ifup-bnep      ifup-plip   ifup-tunnel
ifdown-eth   ifdown-routes  ifup-eth       ifup-plusb  ifup-wireless
ifdown-ippp  ifdown-sit     ifup-ippp      ifup-post   init.ipv6-global
[root@icehouse1 network-scripts(keystone_admin)]# cp ifcfg-eth0 ifcfg-eth1
[root@icehouse1 network-scripts(keystone_admin)]# vi  ifcfg-eth1
[root@icehouse1 network-scripts(keystone_admin)]# vi  ifcfg-eth0
[root@icehouse1 network-scripts(keystone_admin)]# vi  ifcfg-eth1
[root@icehouse1 network-scripts(keystone_admin)]# exit

[root@icehouse1 ~(keystone_admin)]# neutron net-list
+--------------------------------------+---------+-----------------------------------------------------+
| id                                   | name    | subnets                                             |
+--------------------------------------+---------+-----------------------------------------------------+
| 8b22b262-c9c1-4138-8092-0581195f0889 | private | 06530f5a-31af-4a14-a40f-808ee2e9e3ad 40.0.0.0/24    |
| 140d25a4-0d98-4424-a35a-2a985b2f0a17 | demonet | f2e318f8-05c6-4dda-8e8e-07f7a8f2c91a 10.0.0.0/24    |
| 295a5bba-c219-407f-830d-911cd2214349 | public  | c8421c61-7d85-4cf8-a5c8-03c05982bff9 192.168.1.0/24 |
+--------------------------------------+---------+-----------------------------------------------------+

[root@icehouse1 ~(keystone_admin)]# ip netns
qrouter-ecf9ee4e-b92c-4a5b-a884-d753a184764b
qrouter-4135e351-9ae4-4e89-9b23-7b131b2c4e6c
qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17
qdhcp-8b22b262-c9c1-4138-8092-0581195f0889

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-8b22b262-c9c1-4138-8092-0581195f0889 ssh -i oskey45.pem fedora@40.0.0.71
Last login: Sat Aug 30 12:34:35 2014 from 40.0.0.11
[fedora@vf20twonicrxc ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.71
[fedora@vf20twonicrxc ~]$ curl http://169.254.169.254/latest/meta-data/public-ipv4
192.168.1.205
[fedora@vf20twonicrxc ~]$ exit
logout
Connection to 40.0.0.71 closed.

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17 ssh -i oskey45.pem fedora@10.0.0.23
Last login: Sat Aug 30 12:35:11 2014 from 40.0.0.11

[fedora@vf20twonicrxc ~]$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.71
[fedora@vf20twonicurl http://169.254.169.254/latest/meta-data/public-ipv4
192.168.1.205
[fedora@vf20twonicrxc ~]$ exit
logout
Connection to 10.0.0.23 closed.

*************************************
Ubuntu 14.04 guest snapshots
*************************************

 
  
   [root@icehouse1 Downloads]# ssh -i oskey45.pem ubuntu@192.168.1.203
   Welcome to Ubuntu 14.04.1 LTS (GNU/Linux 3.13.0-35-generic x86_64)

 * Documentation:  https://help.ubuntu.com/

  System information as of Sat Aug 30 14:34:00 UTC 2014

  System load:  0.01              Processes:           74
  Usage of /:   17.0% of 6.86GB   Users logged in:     1
  Memory usage: 4%                IP address for eth0: 10.0.0.24
  Swap usage:   0%                IP address for eth1: 40.0.0.72

  Graph this data and manage this system at:
    https://landscape.canonical.com/

  Get cloud support with Ubuntu Advantage Cloud Guest:
    http://www.ubuntu.com/business/services/cloud


  Last login: Sat Aug 30 14:34:00 2014

[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-140d25a4-0d98-4424-a35a-2a985b2f0a17 ssh  -i oskey45.pem ubuntu@10.0.0.24
Welcome to Ubuntu 14.04.1 LTS (GNU/Linux 3.13.0-35-generic x86_64)


 * Documentation:  https://help.ubuntu.com/

  System information as of Sat Aug 30 15:24:45 UTC 2014

  System load:  0.21              Processes:           76
  Usage of /:   19.3% of 6.86GB   Users logged in:     0
  Memory usage: 2%                IP address for eth0: 10.0.0.24
  Swap usage:   0%                IP address for eth1: 40.0.0.72

  Graph this data and manage this system at:
    https://landscape.canonical.com/

  Get cloud support with Ubuntu Advantage Cloud Guest:
    http://www.ubuntu.com/business/services/cloud


Last login: Sat Aug 30 15:24:44 2014


ubuntu@ubuntutwonicrsq:~$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.72
logout
Connection to 10.0.0.24 closed.


[root@icehouse1 ~(keystone_admin)]# ip netns exec qdhcp-8b22b262-c9c1-4138-8092-0581195f0889  ssh  -i oskey45.pem ubuntu@40.0.0.72

Welcome to Ubuntu 14.04.1 LTS (GNU/Linux 3.13.0-35-generic x86_64)
 * Documentation:  https://help.ubuntu.com/

  System information as of Sat Aug 30 16:24:45 UTC 2014

  System load:  0.0               Processes:           88
  Usage of /:   19.7% of 6.86GB   Users logged in:     1
  Memory usage: 6%                IP address for eth0: 10.0.0.24
  Swap usage:   0%                IP address for eth1: 40.0.0.72

  Graph this data and manage this system at:
    https://landscape.canonical.com/

  Get cloud support with Ubuntu Advantage Cloud Guest:
    http://www.ubuntu.com/business/services/cloud


Last login: Sat Aug 30 16:24:46 2014 from 10.0.0.11
ubuntu@ubuntutwonicrsq:~$ curl http://169.254.169.254/latest/meta-data/local-ipv4
40.0.0.72


  

 
  Assigning floating IP for instance with two NICs :-


  
One of two available ports will allow to assign properly working floating IP.

Thursday, August 28, 2014

Setup CentOS 7 cloud instance on IceHouse Neutron ML2&OVS&GRE System

   CentOS 7.0 qcow2 image for glance is available now at
http://openstack.redhat.com/Image_resources
   Regardless dhcp-option 26,1454 is setup in system current image loads with MTU 1500. Workaround for now is to launch instance with no ssh keypair and having postinstallation script :

#cloud-config
password: mysecret
chpasswd: { expire: False }
ssh_pwauth: True

Detecting new default cloud user name :-

modprobe nbd max_part=63
qemu-nbd -c /dev/nbd0 image.qcow2
mkdir -p /mnt/image
mount /dev/nbd0p1 /mnt/image
cat /mnt/image/etc/cloud/cloud.cfg | tail -20
 - ssh-authkey-fingerprints
 - keys-to-console
 - phone-home
 - final-message

system_info:
  default_user:
   name: centos  <= new default name
   lock_passwd: true
   gecos: Cloud User
   groups: [wheel, adm, systemd-journal]
   sudo: ["ALL=(ALL) NOPASSWD:ALL"]
   shell: /bin/bash
   distro: rhel
   paths:
   cloud_dir: /var/lib/cloud
   templates_dir: /etc/cloud/templates
   ssh_svcname: sshd

Then login to vnc-console with given password and run
 
# ifconfig eth0 mtu 1454 up

Setting stays stable between reboots.
 



 Setup Gnome Desktop on VM

# yum -y groupinstall "GNOME Desktop"
$ echo "exec /usr/bin/gnome-session" >> ~/.xinitrc
# ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target

 

Thursday, August 14, 2014

Setup Gluster 3.5.2 on Two Node Controller&Compute Neutron ML2&&VXLAN&&OVS CentOS 7 Cluster

    This post is an update for previous one -  RDO Setup Two Real Node (Controller+Compute) IceHouse Neutron ML2&OVS&VXLAN Cluster 
on CentOS  7  http://bderzhavets.blogspot.com/2014/07/rdo-setup-two-real-node_29.html.  It's focused on Gluster 3.5.2  implementation including tuning /etc/sysconfig/iptables files on Controller and Compute Nodes CentOS 7.
    Copying ssh-key from master node to compute, step by step verification of gluster volume replica 2  functionality and switching RDO IceHouse cinder services to work with gluster volume created  to store instances bootable cinders volumes for performance improvement. Of course creating gluster bricks under "/"  is not recommended . It should be a separate mount point for "xfs" filesystem to store gluster bricks on each node.


- Controller node: Nova, Keystone, Cinder, Glance, Neutron (using Open vSwitch plugin and GRE tunneling )
- Compute node: Nova (nova-compute), Neutron (openvswitch-agent)

icehouse1.localdomain   -  Controller (192.168.1.127)
icehouse2.localdomain   -  Compute   (192.168.1.137)

Download from http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.2/EPEL.repo/epel-7/SRPMS/
glusterfs-3.5.2-1.el7.src.rpm

$ rpm -iv glusterfs-3.5.2-1.el7.src.rpm

$ sudo yum install bison flex gcc automake libtool ncurses-devel readline-devel libxml2-devel openssl-devel libaio-devel lvm2-devel glib2-devel libattr-devel libibverbs-devel librdmacm-devel fuse-devel

$ rpmbuild -bb glusterfs.spec
. . . . . . . . . . . . . . . . . . . . . . .

Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-libs-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-cli-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-rdma-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-geo-replication-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-fuse-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-server-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-api-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-extra-xlators-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/noarch/glusterfs-resource-agents-3.5.2-1.el7.centos.noarch.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-devel-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-api-devel-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-regression-tests-3.5.2-1.el7.centos.x86_64.rpm
Wrote: /home/boris/rpmbuild/RPMS/x86_64/glusterfs-debuginfo-3.5.2-1.el7.centos.x86_64.rpm
Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.Sigc7l
+ umask 022
+ cd /home/boris/rpmbuild/BUILD
+ cd glusterfs-3.5.2
+ rm -rf /home/boris/rpmbuild/BUILDROOT/glusterfs-3.5.2-1.el7.centos.x86_64
+ exit 0

[boris@icehouse1 x86_64]$ cat install
sudo yum install glusterfs-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-api-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-api-devel-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-cli-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-devel-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-extra-xlators-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-fuse-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-geo-replication-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-libs-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-rdma-3.5.2-1.el7.centos.x86_64.rpm \
glusterfs-server-3.5.2-1.el7.centos.x86_64.rpm

$ sudo service glusterd start

1. First step is tuning /etc/sysconfig/iptables for IPv4 iptables firewall (service firewalld should be disabled) :-

Update /etc/sysconfig/iptables on both nodes:-

-A INPUT -p tcp -m multiport --dport 24007:24047 -j ACCEPT
-A INPUT -p tcp --dport 111 -j ACCEPT
-A INPUT -p udp --dport 111 -j ACCEPT
-A INPUT -p tcp -m multiport --dport 38465:38485 -j ACCEPT

Comment out lines bellow , ignoring instruction

# -A FORWARD -j REJECT --reject-with icmp-host-prohibited
# -A INPUT -j REJECT --reject-with icmp-host-prohibited

 Restart service iptables on both nodes

2. Second step:-


On icehouse1, run the following commands :

# ssh-keygen (Hit Enter to accept all of the defaults)
# ssh-copy-id -i ~/.ssh/id_rsa.pub  root@icehouse2

On both nodes run :-

# ./install
# service glusterd start

On icehouse1

#gluster peer probe icehouse2.localdomain

Should return "success"

[root@icehouse1 ~(keystone_admin)]# gluster peer status
Number of Peers: 1

Hostname: icehouse2.localdomain
Uuid: 3ca6490b-c44a-4601-ac13-51fec99e9caf
State: Peer in Cluster (Connected)

[root@icehouse1 ~(keystone_admin)]# gluster volume info

Volume Name: cinder-volumes09
Type: Replicate
Volume ID: 83b645a0-532e-46df-93e2-ed1f95f081cd
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: icehouse1.localdomain:/GLSD/Volumes
Brick2: icehouse2.localdomain:/GLSD/Volumes
Options Reconfigured:
auth.allow: 192.168.1.*

[root@icehouse1 ~(keystone_admin)]# gluster volume status
Status of volume: cinder-volumes09
Gluster process Port Online Pid
------------------------------------------------------------------------------
Brick icehouse1.localdomain:/GLSD/Volumes 49152 Y 5453
Brick icehouse2.localdomain:/GLSD/Volumes 49152 Y 3009
NFS Server on localhost 2049 Y 5458
Self-heal Daemon on localhost N/A Y 5462
NFS Server on icehouse2.localdomain 2049 Y 3965
Self-heal Daemon on icehouse2.localdomain N/A Y 3964

Task Status of Volume cinder-volumes09
------------------------------------------------------------------------------
There are no active volume tasks


[root@icehouse1 ~(keystone_admin)]# ssh 192.168.1.137
Last login: Thu Aug 14 17:53:41 2014
[root@icehouse2 ~]# gluster peer status
Number of Peers: 1

Hostname: 192.168.1.127
Uuid: 051e7528-8c2b-46e1-abb6-6d84b2f2e45b
State: Peer in Cluster (Connected)


*************************************************************************
On Controller (192.168.1.127) and on Compute (192.168.1.137)
*************************************************************************

Verify ports availability:-

[root@icehouse1 ~(keystone_admin)]# netstat -lntp | grep gluster
tcp        0      0 0.0.0.0:49152           0.0.0.0:*               LISTEN      5453/glusterfsd  
tcp        0      0 0.0.0.0:2049             0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38465           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38466           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38468           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:38469           0.0.0.0:*               LISTEN      5458/glusterfs    
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      2667/glusterd    
tcp        0      0 0.0.0.0:978               0.0.0.0:*               LISTEN      5458/glusterfs

************************************
Switching Cinder to Gluster volume
************************************

# gluster volume create cinder-volumes09  replica 2 icehouse1.localdomain:/GLSD/Volumes   icehouse2.localdomain:/GLSD/Volumes  force

# gluster volume start cinder-volumes09

# gluster volume set cinder-volumes09  auth.allow 192.168.1.*


# openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver

# openstack-config --set /etc/cinder/cinder.conf DEFAULT glusterfs_shares_config /etc/cinder/shares.conf

# openstack-config --set /etc/cinder/cinder.conf DEFAULT glusterfs_mount_point_base /var/lib/cinder/volumes

# vi /etc/cinder/shares.conf
    192.168.1.127:/cinder-volumes09

:wq

The following configuration changes are necessary for 'qemu' and 'samba vfs plugin' integration with libgfapi to work seamlessly:
1. gluster volume set cinder-volumes09 server.allow-insecure on
2. Restarting is required
    
    gluster volume stop cinder-volumes09
    gluster volume start cinder-volumes09

3. Edit /etc/glusterfs/glusterd.vol   to have a line :
     
     option rpc-auth-allow-insecure on

4. Restart glusterd is required :

     service glusterd restart 
   

Nova.conf (on Compute Node)  should have entry :-

qemu_allowed_storage_drivers = gluster



Make sure all thin LVM have been deleted via `cinder list` , if no then delete them all.

[root@icehouse1 ~(keystone_admin)]$ for i in api scheduler volume ; do service openstack-cinder-${i} restart ; done

 It should add row to `df -h` output :

[root@icehouse1 ~(keystone_admin)]# df -h
Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/centos01-root        147G   15G  132G  10% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G   13M  3.9G   1% /dev/shm
tmpfs                            3.9G   18M  3.9G   1% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/sdb6                        477M  191M  257M  43% /boot
192.168.1.127:/cinder-volumes09  147G   18G  130G  12% /var/lib/cinder/volumes/5c5ae2460f1962d6f046ca5859584996
tmpfs                            3.9G   18M  3.9G   1% /run/netns



[root@icehouse1 ~(keystone_admin)]# df -h
Filesystem                       Size  Used Avail Use% Mounted on
/dev/mapper/centos01-root        147G   17G  131G  12% /
devtmpfs                         3.9G     0  3.9G   0% /dev
tmpfs                            3.9G   19M  3.9G   1% /dev/shm
tmpfs                            3.9G   42M  3.8G   2% /run
tmpfs                            3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/sdb6                        477M  191M  257M  43% /boot
192.168.1.127:/cinder-volumes09  147G   18G  129G  13% /var/lib/cinder/volumes/5c5ae2460f1962d6f046ca5859584996
tmpfs                            3.9G   42M  3.8G   2% /run/netns

[root@icehouse1 ~(keystone_admin)]# ls -l /var/lib/cinder/volumes/5c5ae2460f1962d6f046ca5859584996
total 5739092
-rw-rw-rw-. 1 root root 5368709120 Aug 14 21:58 volume-2f20aefb-b1ab-4b3f-bb23-10a1cbe9b946
-rw-rw-rw-. 1 root root 5368709120 Aug 14 22:06 volume-d8b0d31c-6f3a-44a1-86a4-bc4575697c29

[root@icehouse1 ~(keystone_admin)]# cinder list --all-tenants
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
|                  ID                  | Status |  Display Name | Size | Volume Type | Bootable |             Attached to              |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+
| 2f20aefb-b1ab-4b3f-bb23-10a1cbe9b946 | in-use | UbuntuLVG0814 |  5   |     None    |   true   | ead0fe1b-923a-4a12-978c-ad33b9ea245c |
| d8b0d31c-6f3a-44a1-86a4-bc4575697c29 | in-use |  VF20VLG0814  |  5   |     None    |   true   | 7343807e-5bd1-4c7f-8b4a-e5efb1ce8c2e |
+--------------------------------------+--------+---------------+------+-------------+----------+--------------------------------------+