Wednesday, July 22, 2015

Setup NovaDocker Container via supertaihei02/docker-centos-lamp:lamp with RDO Kilo on CentOS 7.1

[root@ServerCentOS71 ~(keystone_admin)]# docker pull "supertaihei02/docker-centos-lamp:lamp"
Trying to pull repository docker.io/supertaihei02/docker-centos-lamp ...
fd8dbcf7c7a8: Download complete 
511136ea3c5a: Download complete 
34e94e67e63a: Download complete 
0c752394b855: Download complete 
313bc652d65e: Download complete 
7bca8e6c58d0: Download complete 
02c7dd4b7b17: Download complete 
1ccb205c0045: Download complete 
67c95f15098c: Download complete 
265c1884886d: Download complete 
397ee04ea830: Download complete 
d57e43613b5c: Download complete 
2614d5f8ae73: Download complete 
bcd82bfaa75d: Download complete 
2513dc5a7634: Download complete 
99c99df8dd45: Download complete 
2c7af5e056c6: Download complete 
474020a06892: Download complete 
72ca693b56ec: Download complete 
32538e0f2a30: Download complete 
67d8a91c730d: Download complete 
a3f485a396f4: Download complete 
ad78608b9dec: Download complete 
08fbdbd00577: Download complete 
1fa3dcbd7265: Download complete 
789146ed1568: Download complete 
297a56c9f440: Download complete 
1b1962c27933: Download complete 
de86771fcddf: Download complete 
2fc57b1bb564: Download complete 
b9979c770dbd: Download complete 
dfa087672ba1: Download complete 
108d43aa66f4: Download complete 
4573e6861576: Download complete 
Status: Downloaded newer image for docker.io/supertaihei02/docker-centos-lamp:lamp
 
[root@ServerCentOS71 ~(keystone_admin)]# docker save supertaihei02/docker-centos-lamp:lamp | \
glance image-create --is-public=True   --container-format=docker \
--disk-format=raw --name supertaihei02/docker-centos-lamp:lamp
 
+------------------+---------------------------------------+
| Property         | Value                                 |
+------------------+---------------------------------------+
| checksum         | 24169bb5bf7b8e218af565b9105d231f      |
| container_format | docker                                |
| created_at       | 2015-07-22T13:15:49.000000            |
| deleted          | False                                 |
| deleted_at       | None                                  |
| disk_format      | raw                                   |
| id               | b317fed0-4618-406b-925f-954eb2aa35f7  |
| is_public        | True                                  |
| min_disk         | 0                                     |
| min_ram          | 0                                     |
| name             | supertaihei02/docker-centos-lamp:lamp |
| owner            | 616a4800e9a541b78e4aba836314883d      |
| protected        | False                                 |
| size             | 899017728                             |
| status           | active                                |
| updated_at       | 2015-07-22T13:17:13.000000            |
| virtual_size     | None                                  |
+------------------+---------------------------------------+
Next
[root@ServerCentOS71 ~(keystone_admin)]# docker images |grep lamp
docker.io/supertaihei02/docker-centos-lamp   lamp                fd8dbcf7c7a8      13 months ago       883.8 MB
 
Nova-Doker container started

[root@ServerCentOS71 ~(keystone_admin)]# docker ps
 
CONTAINER ID        IMAGE                                   COMMAND               CREATED             STATUS              PORTS               NAMES
6a1e0a782a7d        supertaihei02/docker-centos-lamp:lamp   "/usr/bin/monit -I"   4 minutes ago       Up 4 minutes                            nova-8ade872c-45dc-40b0-be38-a5ab61d7de0f 
 
[root@ServerCentOS71 ~(keystone_admin)]# docker logs 6a1e0a782a7d
monit: generated unique Monit id 78e9d47e7325f30d9ba76b908278921b and stored to '/var/monit/
 
    
 
  
 
[root@ServerCentOS71 ~]# ssh -p 22 -l guest  192.168.1.165
guest@192.168.1.165's password: 
Last login: Thu Jul 23 04:39:50 2015 from 192.168.1.87
[guest@instance-00000010 ~]$ cat /etc/issue
CentOS release 6.5 (Final)
Kernel \r on an \m

[guest@instance-00000010 ~]$ sudo su -[root@instance-00000010 ~]# ps -ef | grep -v ""

UID        PID  PPID  C STIME TTY          TIME CMD
root         1     0  0 01:22 ?        00:00:00 /usr/bin/monit -I
root        18     1  0 01:22 ?        00:00:00 /usr/sbin/sshd
root       300     1  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     302   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     303   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     304   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     305   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     306   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     307   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     308   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
apache     309   300  0 01:22 ?        00:00:00 /usr/sbin/httpd
root      1079     1  0 02:08 ?        00:00:00 /bin/sh /usr/bin/mysqld_safe --datadir=/var/lib/mysql --socket=/var/lib/mysql/mysql.sock --pid-file=/var/run/mysqld/mysqld.pid --basedir=/usr --user=mysql
mysql     1286  1079  0 02:08 ?        00:00:03 /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --plugin-dir=/usr/lib64/mysql/plugin --user=mysql --log-error=/var/log/mysqld.log --pid-file=/var/run/mysqld/mysqld.pid --socket=/var/lib/mysql/mysql.sock
boris     4270     1  0 04:25 ?        00:00:01 /usr/bin/Xvnc :1 -desktop instance-00000010:1 (boris) -auth /home/boris/.Xauthority -geometry 1024x768 -rfbwait 30000 -rfbauth /home/boris/.vnc/passwd -rfbport 5901 -fp catalogue:/etc/X11/fontpath.d -pn
boris     4275     1  0 04:25 ?        00:00:00 /usr/bin/gnome-session
boris     4276  4275  0 04:25 ?        00:00:00 vncconfig -iconic
boris     4284     1  0 04:25 ?        00:00:00 dbus-launch --sh-syntax --exit-with-session
boris     4285     1  0 04:25 ?        00:00:00 /bin/dbus-daemon --fork --print-pid 4 --print-address 6 --session
boris     4296     1  0 04:25 ?        00:00:00 /usr/libexec/gconfd-2
boris     4301     1  0 04:25 ?        00:00:00 gnome-keyring-daemon --start
boris     4304     1  0 04:25 ?        00:00:00 /usr/libexec/gnome-settings-daemon
boris     4309     1  0 04:25 ?        00:00:00 /usr/libexec/gvfsd
boris     4313  4275  0 04:25 ?        00:00:00 metacity
boris     4319  4275  0 04:25 ?        00:00:00 gnome-panel
boris     4321  4275  0 04:25 ?        00:00:00 nautilus
boris     4323     1  0 04:25 ?        00:00:00 /usr/libexec/bonobo-activation-server --ac-activate --ior-output-fd=18
boris     4345     1  0 04:25 ?        00:00:00 /usr/libexec/wnck-applet --oaf-activate-iid=OAFIID:GNOME_Wncklet_Factory --oaf-ior-fd=18
boris     4348     1  0 04:25 ?        00:00:00 /usr/libexec/trashapplet --oaf-activate-iid=OAFIID:GNOME_Panel_TrashApplet_Factory --oaf-ior-fd=24
boris     4356  4275  0 04:25 ?        00:00:00 gnome-volume-control-applet
boris     4360     1  0 04:25 ?        00:00:00 gnome-screensaver
boris     4366     1  0 04:25 ?        00:00:00 /usr/libexec/gvfsd-trash --spawner :1.7 /org/gtk/gvfs/exec_spaw/0
boris     4383     1  0 04:25 ?        00:00:00 /usr/libexec/notification-area-applet --oaf-activate-iid=OAFIID:GNOME_NotificationAreaApplet_Factory --oaf-ior-fd=28
boris     4384     1  0 04:25 ?        00:00:00 /usr/libexec/clock-applet --oaf-activate-iid=OAFIID:GNOME_ClockApplet_Factory --oaf-ior-fd=34
boris     4470     1  0 04:25 ?        00:00:05 /usr/lib64/firefox/firefox
root      6879    18  0 04:39 ?        00:00:00 sshd: guest [priv]
guest     6895  6879  0 04:39 ?        00:00:00 sshd: guest@pts/0
guest     6896  6895  0 04:39 pts/0    00:00:00 -bash
root      6925  6896  0 04:39 pts/0    00:00:00 sudo su -
root      6926  6925  0 04:39 pts/0    00:00:00 su -
root      6927  6926  0 04:39 pts/0    00:00:00 -bash
root      7082  6927  3 04:40 pts/0    00:00:00 ps -ef

Sunday, July 19, 2015

Setup Nova-Docker Driver with RDO Kilo on F22 via master branch https://github.com/stackforge/nova-docker


Current post describes in details a sequence of steps which allow to work
with Nova-Docker driver been built based on top commit of master nova-docker
branch, stable/kilo branch is not supposed to be checked out before
running `python setup.py install`.  This results several additional efforts.
First one is targeting ability to restart nova-compute service after switching to NovaDocker driver , second one is related with tuning configuration files of glance-api and glance-registry services and make them able for successful restart after adding "docker" to "containers_formats" in glance-api.conf

Follow RDO Kilo setup on Fedora 22 as suggested in http://lxer.com/module/newswire/view/216855/index.html 
When done proceed with build NovaDocker driver via master branch
http://github.com/stackforge/nova-docker.git  .

Due to https://review.openstack.org/#/c/188339/
( see https://bugs.launchpad.net/nova/+bug/1461217 )
is not packaged with RDO Kilo on Fedora workaround bellow is required to be
able restart openstack-nova-compute after switching  to new driver.
Same result may be obtained following https://bugs.launchpad.net/nova/+bug/1461217
via manual update  /usr/lib/python2.7/dist-packages/nova/compute/hv_type.py and recompiling hv_type.py.


**********************************
Setting up NovaDocker driver
**********************************

# dnf -y install git docker-io python-six  fedora-repos-rawhide
# dnf --enablerepo=rawhide install  python-pip python-pbr

 **********************
 Next
 **********************
 # git clone http://github.com/stackforge/nova-docker.git
 # cd nova-docker
 # git branch -v -a
* master                         8568e60 Updated from global requirements
  remotes/origin/HEAD            -> origin/master
  remotes/origin/master          8568e60 Updated from global requirements
  remotes/origin/stable/icehouse 9045ca4 Fix lockpath for tests
  remotes/origin/stable/juno     b724e65 Fix tests on stable/juno
  remotes/origin/stable/kilo     d556444 Do not enable swift/ceilometer/sahara
 # python setup.py install
 # systemctl start docker
 # systemctl enable docker
 # chmod 666  /var/run/docker.sock
 # mkdir /etc/nova/rootwrap.d

Now update  /usr/lib/python2.7/site-packages/novadocker/virt/docker/driver.py
line 326 . Actually, this hack comes from version driver.py generated via stable
branch stable/kilo of nova-docker.git :-

'supported_instances': jsonutils.dumps([
            ('i686', 'docker', 'lxc'),
            ('x86_64', 'docker', 'lxc')
        ])

# python -m py_compile driver.py

******************************
Update nova.conf
******************************
vi /etc/nova/nova.conf
set "compute_driver = novadocker.virt.docker.DockerDriver"

**********************************
Add to folder /etc/glance files
**********************************
1. glance-api-paste.ini from
http://docs.openstack.org/kilo/config-reference/content/section_glance-api-paste.conf.html
2. glance-registry-paste.ini from
http://docs.openstack.org/kilo/config-reference/content/section_glance-registry-paste.conf.html


************************************
Update glance-api.conf
************************************
container_formats=ami,ari,aki,bare,ovf,ova,docker

Add to section [glance-store] at the bottom of glance-api.conf

filesystem_store_datadir = /var/lib/glance/images

************************************************
Next, create the docker.filters file:
************************************************
$ vi /etc/nova/rootwrap.d/docker.filters

Insert Lines

# nova-rootwrap command filters for setting up network in the docker driver
# This file should be owned by (and only-writeable by) the root user
[Filters]
# nova/virt/docker/driver.py: 'ln', '-sf', '/var/run/netns/.*'
ln: CommandFilter, /bin/ln, root

***************************
Restart Services
***************************
# systemctl restart openstack-nova-compute
# systemctl status openstack-nova-compute
# systemctl restart openstack-glance-api
# systemctl restart openstack-glance-registry


Testing Ubuntu Vivid docker image

1. Follow  https://github.com/tutumcloud/tutum-ubuntu 
2. Upload image to glance and launch Nova Docker container

[root@serverFedora22 ~(keystone_admin)]#  docker save tutum/ubuntu:vivid  | \
glance image-create --is-public=True   --container-format=docker \
--disk-format=raw --name tutum/ubuntu:vivid



Launch novadocker instance via dashboard ( in my case floating IP 192.168.1.160 ) password is provided via `docker logs container-id`

[root@serverFedora22 ~(keystone_admin)]# ssh -p 22 root@192.168.1.160
root@192.168.1.160's password:
Last login: Sun Jul 19 14:19:12 2015 from 192.168.1.85
root@instance-00000008:~# echo "nameserver 83.221.202.254" > /etc/resolv.conf



Next test Wordpress NovaDocker container

#  docker pull eugeneware/docker-wordpress-nginx
# . keystonerc_admin
#  docker save eugeneware/docker-wordpress-nginx:latest  | glance image-create --is-public true --container-format docker --disk-format raw --name  tutum/wordpress:latest eugeneware/docker-wordpress-nginx:latest



   Wordpess Server is available from Office LAN floating IP 192.168.1.161  

 


Friday, July 10, 2015

Setup Nova-Docker Driver with RDO Kilo on Fedora 22

Hackery bellow was tested multiple times for AIO installs via packstack,
providing completly functional Neutron Services and allows to create neutron
routers, tenant's and external networks on single box or virtual machine.
View for instance https://www.rdoproject.org/forum/categories/blog/
However, I have got some negative results attempting to perform multinode
deployment,separating Controller and Network Nodes regardless successful packstack completion.In case of Two Node Controller&&Network + Compute packstack deployment hackery still works.

Update which make possible RDO Kilo Three Node deployment on Fedora 22 ( as of time of writing 07/12/2015)  maybe seen here
 
************************************************
First setup RDO KIlo (AIO) on Fedora 22
************************************************
# dnf install -y https://rdoproject.org/repos/rdo-release.rpm
# dnf  install -y openstack-packstack  
# dnf install fedora-repos-rawhide

# dnf  --enablerepo=rawhide update openstack-packstack
Fedora - Rawhide - Developmental packages for the next Fedora re 4.0 MB/s |  43 MB     00:10   
Last metadata expiration check performed 0:00:41 ago on Sat Jul 11 13:58:50 2015.
Dependencies resolved.
=================================================================================================
 Package                       Arch      Version                                Repository  Size
=================================================================================================
Upgrading:
 openstack-packstack           noarch    2015.1-0.8.dev1589.g1d6372f.fc23       rawhide    235 k
 openstack-packstack-puppet    noarch    2015.1-0.8.dev1589.g1d6372f.fc23       rawhide     23 k

Transaction Summary
=================================================================================================
Upgrade  2 Packages

Total download size: 258 k
Is this ok [y/N]: y
Downloading Packages:
(1/2): openstack-packstack-puppet-2015.1-0.8.dev1589.g1d6372f.fc 175 kB/s |  23 kB     00:00   
(2/2): openstack-packstack-2015.1-0.8.dev1589.g1d6372f.fc23.noar 891 kB/s | 235 kB     00:00   
-------------------------------------------------------------------------------------------------
Total                                                            128 kB/s | 258 kB     00:02    
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
  Upgrading   : openstack-packstack-puppet-2015.1-0.8.dev1589.g1d6372f.fc23.noarch           1/4
  Upgrading   : openstack-packstack-2015.1-0.8.dev1589.g1d6372f.fc23.noarch                  2/4
  Cleanup     : openstack-packstack-2015.1-0.2.dev1537.gba5183c.fc23.noarch                  3/4
  Cleanup     : openstack-packstack-puppet-2015.1-0.2.dev1537.gba5183c.fc23.noarch           4/4
  Verifying   : openstack-packstack-2015.1-0.8.dev1589.g1d6372f.fc23.noarch                  1/4
  Verifying   : openstack-packstack-puppet-2015.1-0.8.dev1589.g1d6372f.fc23.noarch           2/4
  Verifying   : openstack-packstack-2015.1-0.2.dev1537.gba5183c.fc23.noarch                  3/4
  Verifying   : openstack-packstack-puppet-2015.1-0.2.dev1537.gba5183c.fc23.noarch           4/4
Upgraded:
  openstack-packstack.noarch 2015.1-0.8.dev1589.g1d6372f.fc23                                   
  openstack-packstack-puppet.noarch 2015.1-0.8.dev1589.g1d6372f.fc23                            

Complete!

# dnf install python3-pyOpenSSL.noarch python-service-identity.noarch python-ndg_httpsclient.noarch

**********************
At this point run :-
**********************
# packstack  --gen-answer-file answer-file-aio.txt
and set
CONFIG_KEYSTONE_SERVICE_NAME=httpd

************************************************************************
I also commented out second line in  /etc/httpd/conf.d/mod_dnssd.conf
************************************************************************

# cd   /usr/lib/python2.7/site-packages/packstack/puppet/templates
and apply third patch from link . It will disable provision_demo.pp

Then run
# packstack --answer-file=./answer-file-aio.txt

***********************************************
Upon completion you are supposed to get :-
***********************************************
[root@WorkstationF22 ~]# rpm -qa  \*puppet\*

openstack-puppet-modules-2015.1.6-2.fc23.noarch
puppet-4.1.0-1.fc22.noarch
openstack-packstack-puppet-2015.1-0.8.dev1589.g1d6372f.fc23.noarch

******************************************
Configure OVS_BRIDGE and OVS_PORT  
******************************************
In particular case, external neutron network 192.168.1.0/24 match LAN
office network, IP address of physical router is 192.168.1.1

[root@ServerFedora22 network-scripts(keystone_admin)]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="192.168.1.32"
NETMASK="255.255.255.0"
DNS1="8.8.8.8"
BROADCAST="192.168.1.255"
GATEWAY="192.168.1.1"
NM_CONTROLLED="no"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no

[root@ServerFedora22 network-scripts(keystone_admin)]# cat ifcfg-enp2s0
DEVICE="enp2s0"
ONBOOT="yes"
HWADDR="90:E6:BA:2D:11:EB"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no
*****************************************
When configuration above is done :-
*****************************************
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# reboot
**********************************
Setting up NovaDocker driver
**********************************

# dnf -y install git docker-io python-six  fedora-repos-rawhide
# dnf --enablerepo=rawhide install  python-pip python-pbr
# reboot
 **********************
 Next
 **********************
 # chmod 666 /var/run/docker.sock
 # git clone http://github.com/stackforge/nova-docker.git
 # cd nova-docker
 # git checkout -b kilo origin/stable/kilo
 # git branch -v -a
 * kilo                           d556444 Do not enable swift/ceilometer/sahara
  master                         d556444 Do not enable swift/ceilometer/sahara
  remotes/origin/HEAD            -> origin/master
  remotes/origin/master          d556444 Do not enable swift/ceilometer/sahara
  remotes/origin/stable/icehouse 9045ca4 Fix lockpath for tests
  remotes/origin/stable/juno     b724e65 Fix tests on stable/juno
  remotes/origin/stable/kilo     d556444 Do not enable swift/ceilometer/sahara

 # python setup.py install
 # systemctl start docker
 # systemctl enable docker
 # chmod 666  /var/run/docker.sock
 # mkdir /etc/nova/rootwrap.d

******************************
Update nova.conf
******************************
vi /etc/nova/nova.conf
set "compute_driver = novadocker.virt.docker.DockerDriver"

************************************************
Next, create the docker.filters file:
************************************************
$ vi /etc/nova/rootwrap.d/docker.filters

Insert Lines

# nova-rootwrap command filters for setting up network in the docker driver
# This file should be owned by (and only-writeable by) the root user
[Filters]
# nova/virt/docker/driver.py: 'ln', '-sf', '/var/run/netns/.*'
ln: CommandFilter, /bin/ln, root

*****************************************
Add line /etc/glance/glance-api.conf
*****************************************
container_formats=ami,ari,aki,bare,ovf,ova,docker

Restart Services
************************
# systemctl restart openstack-nova-compute
# systemctl status openstack-nova-compute
# systemctl restart openstack-glance-api

Testing Ubuntu Vivid docker image

1. Follow  https://github.com/tutumcloud/tutum-ubuntu 
2. Upload image to glance and launch Nova Docker container

[root@WorkstationF22 ~(keystone_admin)]#  docker save tutum/ubuntu:vivid  | \
glance image-create --is-public=True   --container-format=docker \
--disk-format=raw --name tutum/ubuntu:vivid

+------------------+--------------------------------------+
| Property         | Value                                |
+------------------+--------------------------------------+
| checksum         | a2abfd90ee54f14bb44e8cfdd7ae3159     |
| container_format | docker                               |
| created_at       | 2015-07-10T14:47:13.000000           |
| deleted          | False                                |
| deleted_at       | None                                 |
| disk_format      | raw                                  |
| id               | 0a5c442a-8067-4db8-8689-04b76412e001 |
| is_public        | True                                 |
| min_disk         | 0                                    |
| min_ram          | 0                                    |
| name             | tutum/ubuntu:vivid                   |
| owner            | 18b9889bd61140e58b7bf90904d2abcc     |
| protected        | False                                |
| size             | 220456960                            |
| status           | active                               |
| updated_at       | 2015-07-10T14:47:41.000000           |
| virtual_size     | None                                 |
+------------------+--------------------------------------+



  Implement security rules in same way as for Libvirt driver and launch instance via dashboard :-


  


[root@WorkstationF22 ~(keystone_admin)]# docker ps
CONTAINER ID        IMAGE                          COMMAND               CREATED             STATUS              PORTS               NAMES
ec0f6df23bd2        tutum/ubuntu:vivid             "/run.sh"             49 seconds ago      Up 47 seconds                           nova-e543f496-db14-4d89-a5fb-56ae91f2946b  
d4b6ea309bdc        rastasheep/ubuntu-sshd:14.04   "/usr/sbin/sshd -D"   28 minutes ago      Up 28 minutes                           nova-527a7e22-707a-4d00-9b17-5ccfbc15a9aa

  
[root@WorkstationF22 ~(keystone_admin)]# docker logs ec0f6df23bd2
=> Setting a random password to the root user
=> Done!
==================================================

You can now connect to this Ubuntu container via SSH using:

    ssh -p root@
and enter the root password 'MO0NBGvlYlu8' when prompted

Please remember to change the above password as soon as possible!
==================================================


[root@WorkstationF22 ~(keystone_admin)]# ssh -p 22 root@192.168.1.153
The authenticity of host '192.168.1.153 (192.168.1.153)' can't be established.
ECDSA key fingerprint is SHA256:0+T/egdd9DE3tx0AmUO71qkWVo3PbWC0+vOS+lAt2AY.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.1.153' (ECDSA) to the list of known hosts.
root@192.168.1.153's password:
root@instance-00000003:~# cat /etc/issue
Ubuntu 15.04 \n \l

root@instance-00000003:~# ps -ef
UID        PID  PPID  C STIME TTY          TIME CMD
root         1     0  0 14:55 ?        00:00:00 /usr/sbin/sshd -D
root        12     1  0 14:57 ?        00:00:00 sshd: root@pts/0   
root        13    12  0 14:57 pts/0    00:00:00 -bash
root        20    13  0 14:57 pts/0    00:00:00 ps -ef
root@instance-00000003:~#
 

**************************
Update /etc/rc.d/rc.local
**************************
chmod 666  /var/run/docker.sock
service openstack-nova-compute restart

Saturday, June 06, 2015

Switching to Dashboard Spice Console in RDO Kilo on Fedora 22

*************************
UPDATE 06/27/2015
*************************
# dnf install -y https://rdoproject.org/repos/rdo-release.rpm
# dnf  install -y openstack-packstack  
# dnf install fedora-repos-rawhide
# dnf  --enablerepo=rawhide update openstack-packstack

Fedora - Rawhide - Developmental packages for the next Fedora re 1.7 MB/s |  45 MB     00:27   
Last metadata expiration check performed 0:00:39 ago on Sat Jun 27 13:23:03 2015.
Dependencies resolved.
==============================================================
 Package                       Arch      Version                                Repository  Size
==============================================================
Upgrading:
 openstack-packstack           noarch    2015.1-0.7.dev1577.gc9f8c3c.fc23       rawhide    233 k
 openstack-packstack-puppet    noarch    2015.1-0.7.dev1577.gc9f8c3c.fc23       rawhide     23 k

Transaction Summary
==============================================================
Upgrade  2 Packages
 .  .  .  .  .

# dnf install python3-pyOpenSSL.noarch python-service-identity.noarch python-ndg_httpsclient.noarch

At this point run :-

# packstack  --gen-answer-file answer-file-aio.txt

and set

CONFIG_KEYSTONE_SERVICE_NAME=httpd

I also commented out second line in  /etc/httpd/conf.d/mod_dnssd.conf
Then run `packstack --answer-file=./answer-file-aio.txt` , however you will still
need pre-patch provision_demo.pp at the moment
( see third patch at http://textuploader.com/yn0v ) , the rest should work fine.

Upon completion you may try follow :-
https://www.rdoproject.org/Neutron_with_existing_external_network
I didn't test it on Fedora 22, just creating external and private networks of VXLAN type and configure
 
[root@ServerFedora22 network-scripts(keystone_admin)]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="192.168.1.32"
NETMASK="255.255.255.0"
DNS1="8.8.8.8"
BROADCAST="192.168.1.255"
GATEWAY="192.168.1.1"
NM_CONTROLLED="no"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no

[root@ServerFedora22 network-scripts(keystone_admin)]# cat ifcfg-enp2s0
DEVICE="enp2s0"
ONBOOT="yes"
HWADDR="90:E6:BA:2D:11:EB"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

When configuration above is done :-

# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# reboot

*************************
UPDATE 06/26/2015
*************************
To install RDO Kilo on Fedora 22 :-
after `dnf -y install openstack-packstack `
# cd /usr/lib/python2.7/site-packages/packstack/puppet/templates
Then apply following 3 patches   
# cd ; packstack  --gen-answer-file answer-file-aio.txt
Set "CONFIG_NAGIOS_INSTALL=n" in  answer-file-aio.txt
# packstack --answer-file=./answer-file-aio.txt
************************
UPDATE 05/19/2015
************************
MATE Desktop supports sound ( via patch mentioned bellow) on RDO Kilo  Cloud instances F22, F21, F20. RDO Kilo AIO install performed on bare metal.
Also Windows Server 2012 (evaluation version) cloud VM provides pretty stable "video/sound" ( http://www.cloudbase.it/windows-cloud-images/ ) .
************************
UPDATE 05/14/2015
************************ 
I've  got sound working on CentOS 7 VM ( connection  to console via virt-manager)  with slightly updated patch of Y.Kawada , self.type set "ich6" RDO Kilo installed on bare metal AIO testing host, Fedora 22. Same results have been  obtained for RDO Kilo on CentOS 7.1. However , connection to spice console having cut&&paste and sound enabled features may be obtained via spicy ( remote connection)
Generated libvirt.xml
<domain type="kvm">
  <uuid>455877f2-7070-48a7-bb24-e0702be2fbc5</uuid>
  <name>instance-00000003</name>
  <memory>2097152</memory>
  <vcpu cpuset="0-7">1</vcpu>
  <metadata>
    <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.0">
      <nova:package version="2015.1.0-3.el7"/>
      <nova:name>CentOS7RSX05</nova:name>
      <nova:creationTime>2015-06-14 18:42:11</nova:creationTime>
      <nova:flavor name="m1.small">
        <nova:memory>2048</nova:memory>
        <nova:disk>20</nova:disk>
        <nova:swap>0</nova:swap>
        <nova:ephemeral>0</nova:ephemeral>
        <nova:vcpus>1</nova:vcpus>
      </nova:flavor>
      <nova:owner>
        <nova:user uuid="da79d2c66db747eab942bdbe20bb3f44">demo</nova:user>
        <nova:project uuid="8c9defac20a74633af4bb4773e45f11e">demo</nova:project>
      </nova:owner>
      <nova:root type="image" uuid="4a2d708c-7624-439f-9e7e-6e133062e23a"/>
    </nova:instance>
  </metadata>
  <sysinfo type="smbios">
    <system>
      <entry name="manufacturer">Fedora Project</entry>
      <entry name="product">OpenStack Nova</entry>
      <entry name="version">2015.1.0-3.el7</entry>
      <entry name="serial">b3fae7c3-10bd-455b-88b7-95e586342203</entry>
      <entry name="uuid">455877f2-7070-48a7-bb24-e0702be2fbc5</entry>
    </system>
  </sysinfo>
  <os>
    <type>hvm</type>
    <boot dev="hd"/>
    <smbios mode="sysinfo"/>
  </os>
  <features>
    <acpi/>
    <apic/>
  </features>
  <cputune>
    <shares>1024</shares>
  </cputune>
  <clock offset="utc">
    <timer name="pit" tickpolicy="delay"/>
    <timer name="rtc" tickpolicy="catchup"/>
    <timer name="hpet" present="no"/>
  </clock>
  <cpu mode="host-model" match="exact">
    <topology sockets="1" cores="1" threads="1"/>
  </cpu>
  <devices>
    <disk type="file" device="disk">
      <driver name="qemu" type="qcow2" cache="none"/>
      <source file="/var/lib/nova/instances/455877f2-7070-48a7-bb24-e0702be2fbc5/disk"/>
      <target bus="virtio" dev="vda"/>
    </disk>
    <interface type="bridge">
      <mac address="fa:16:3e:87:4b:29"/>
      <model type="virtio"/>
      <source bridge="qbr8ce9ae7b-f0"/>
      <target dev="tap8ce9ae7b-f0"/>
    </interface>
    <serial type="file">
      <source path="/var/lib/nova/instances/455877f2-7070-48a7-bb24-e0702be2fbc5/console.log"/>
    </serial>
    <serial type="pty"/>
    <channel type="spicevmc">
      <target type="virtio" name="com.redhat.spice.0"/>
    </channel>
    <graphics type="spice" autoport="yes" keymap="en-us" listen="0.0.0.0   "/>
    <video>
      <model type="qxl"/>
    </video>
    <sound model="ich6"/>
    <memballoon model="virtio">
      <stats period="10"/>
    </memballoon>
  </devices>
</domain>

  

*****************
END UPDATE
*****************

The post follows up http://lxer.com/module/newswire/view/214893/index.html
The most recent `yum update` on F22 significantly improved network performance on cloud VMs (L2) . Watching movies running on cloud F22 VM (with "Mate Desktop" been installed and functioning pretty smoothly) without sound refreshes spice memories,view https://bugzilla.redhat.com/show_bug.cgi?format=multiple&id=913607


# dnf -y install spice-html5 ( installed on Controller && Compute)
# dnf -y install  openstack-nova-spicehtml5proxy (Compute Node)
# rpm -qa | grep openstack-nova-spicehtml5proxy
openstack-nova-spicehtml5proxy-2015.1.0-3.fc23.noarch

*********************************************************************** 
Update /etc/nova/nova.conf on Controller && Compute Node as follows :-
***********************************************************************

[DEFAULT]

. . . . .
web=/usr/share/spice-html5 
. . . . . .
spicehtml5proxy_host=0.0.0.0  (only Compute)
spicehtml5proxy_port=6082     (only Compute)
. . . . . . .
# Disable VNC
vnc_enabled=false
. . . . . . .
[spice]
# Compute Node Management IP 192.169.142.137

html5proxy_base_url=http://192.169.142.137:6082/spice_auto.html
server_proxyclient_address=127.0.0.1 ( only  Compute )
server_listen=0.0.0.0 ( only  Compute )
enabled=true
agent_enabled=true
keymap=en-us

:wq


# service httpd restart ( on Controller )

Next actions to be performed on Compute Node

# service openstack-nova-compute restart
# service openstack-nova-spicehtml5proxy start
# systemctl enable openstack-nova-spicehtml5proxy

  

On Controller

[root@ip-192-169-142-127 ~(keystone_admin)]# nova list --all-tenants

+--------------------------------------+-----------+----------------------------------+---------+------------+-------------+----------------------------------+
| ID                                   | Name      | Tenant ID                        | Status  | Task State | Power State | Networks                         |
+--------------------------------------+-----------+----------------------------------+---------+------------+-------------+----------------------------------+
| 6c8ef008-e8e0-4f1c-af17-b5f846f8b2d9 | CirrOSDev | 7e5a0f3ec3fe45dc83ae0947ef52adc3 | SHUTOFF | -          | Shutdown    | demo_net=50.0.0.11, 172.24.4.228 |
| cfd735ea-d9a8-4c4e-9a77-03035f01d443 | VF22DEVS  | 7e5a0f3ec3fe45dc83ae0947ef52adc3 | ACTIVE  | -          | Running     | demo_net=50.0.0.14, 172.24.4.231 |
+--------------------------------------+-----------+----------------------------------+---------+------------+-------------+----------------------------------+

[root@ip-192-169-142-127 ~(keystone_admin)]# nova get-spice-console cfd735ea-d9a8-4c4e-9a77-03035f01d443  spice-html5

+-------------+----------------------------------------------------------------------------------------+
| Type        | Url                                                                                    |
+-------------+----------------------------------------------------------------------------------------+
| spice-html5 | http://192.169.142.137:6082/spice_auto.html?token=24fb65c7-e7e9-4727-bad3-ba7c2c29f7f4 |
+-------------+----------------------------------------------------------------------------------------+

  

   

     Session running by virt-manager on Virtualization Host ( F22 )   
   Connection to Compute Node 192.169.142.137 has been activated

  

 Active VM features :-


Actually , not much spice benefits enabled , just QXL video mode

[root@fedora22wks 2b75c461-fbe0-4527-a031-08d2e729db91]# pwd
/var/lib/nova/instances/2b75c461-fbe0-4527-a031-08d2e729db91

[root@fedora22wks 2b75c461-fbe0-4527-a031-08d2e729db91]# cat libvirt.xml

<domain type="kvm">
  <uuid>2b75c461-fbe0-4527-a031-08d2e729db91</uuid>
  <name>instance-00000003</name>
  <memory>2097152</memory>
  <vcpu cpuset="0-3">1</vcpu>
  <metadata>
    <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.0">
      <nova:package version="2015.1.0-3.fc23"/>
      <nova:name>VF22Devs</nova:name>
      <nova:creationTime>2015-06-06 16:50:07</nova:creationTime>
      <nova:flavor name="m1.small">
        <nova:memory>2048</nova:memory>
        <nova:disk>20</nova:disk>
        <nova:swap>0</nova:swap>
        <nova:ephemeral>0</nova:ephemeral>
        <nova:vcpus>1</nova:vcpus>
      </nova:flavor>
      <nova:owner>
        <nova:user uuid="6a89f1e00f554e37b3c288f20daa34ec">demo</nova:user>
        <nova:project uuid="22cd2b8ca101493ba621c1656141cea6">demo</nova:project>
      </nova:owner>
      <nova:root type="image" uuid="19c62e6f-527e-4e4a-b84a-c92f8caa7334"/>
    </nova:instance>
  </metadata>
  <sysinfo type="smbios">
    <system>
      <entry name="manufacturer">Fedora Project</entry>
      <entry name="product">OpenStack Nova</entry>
      <entry name="version">2015.1.0-3.fc23</entry>
      <entry name="serial">75cbcf76-d9ef-479e-8f2e-99b89adfc667</entry>
      <entry name="uuid">2b75c461-fbe0-4527-a031-08d2e729db91</entry>
    </system>
  </sysinfo>
  <os>
    <type>hvm</type>
    <boot dev="hd"/>
    <smbios mode="sysinfo"/>
  </os>
  <features>
    <acpi/>
    <apic/>
  </features>
  <cputune>
    <shares>1024</shares>
  </cputune>
  <clock offset="utc">
    <timer name="pit" tickpolicy="delay"/>
    <timer name="rtc" tickpolicy="catchup"/>
    <timer name="hpet" present="no"/>
  </clock>
  <cpu mode="host-model" match="exact">
    <topology sockets="1" cores="1" threads="1"/>
  </cpu>
  <devices>
    <disk type="file" device="disk">
      <driver name="qemu" type="qcow2" cache="none"/>
      <source file="/var/lib/nova/instances/2b75c461-fbe0-4527-a031-08d2e729db91/disk"/>
      <target bus="virtio" dev="vda"/>
    </disk>
    <interface type="bridge">
      <mac address="fa:16:3e:20:b9:4f"/>
      <model type="virtio"/>
      <source bridge="qbr8af1434b-25"/>
      <target dev="tap8af1434b-25"/>
    </interface>
    <serial type="file">
      <source path="/var/lib/nova/instances/2b75c461-fbe0-4527-a031-08d2e729db91/console.log"/>
    </serial>
    <serial type="pty"/>
    <channel type="pty">
      <target type="virtio" name="com.redhat.spice.0"/>
    </channel>
    <graphics type="spice" autoport="yes" keymap="en-us" listen="0.0.0.0"/>
    <video>
      <model type="qxl"/>
    </video>
    <memballoon model="virtio">
      <stats period="10"/>
    </memballoon>
  </devices>
</domain>



   References
   1.  http://blog.felipe-alfaro.com/2014/05/13/html5-spice-console-in-openstack/
   2.  https://www.rdoproject.org/Neutron_with_existing_external_network

Friday, May 29, 2015

RDO Kilo Set up for three F22 VM Nodes Controller&Network&Compute (ML2&OVS&VXLAN)

************************
UPDATE 07/12/2015
************************
  During last month procedure of RDO Kilo install has been significantly changed.
View details here Switching to Dashboard Spice Console in RDO Kilo on Fedora 22.   Patching per Javier Pena is no longer important. Now mentioned install
requires `dnf --enablerepo=rawhide update openstack-packstack`.
View also https://www.redhat.com/archives/rdo-list/2015-July/msg00002.html
section about  "Switching to Dashboard Spice in RDO Kilo on Fedora 22"
  The most recent version ( as of time of writing ) of openstack-packstack in rawhide is  2015.1 Release 0.8.dev1589.g1d6372f.fc23
http://arm.koji.fedoraproject.org/koji/buildinfo?buildID=294991

In meantime to get schema bellow working I was forced to create Libvirt Subnet
for emulating external network like:-

# cat public.xml
 <network>
   <name>public</name>
   <uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr2' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='192.168.174.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='192.168.174.2' end='192.168.174.254' />
     </dhcp>
   </ip>
</network>

In other words Network should be like (XXX.XXX.XXX.0/24) , gateway
(XXX.XXX.XXX.1)
If you are concerned about details, see :-
https://www.redhat.com/archives/rdo-list/2015-July/msg00040.html

****************
END UPDATE
****************
     Following bellow is brief instruction  for three node deployment test Controller&&Network&&Compute across Fedora 22 VMs for RDO Kilo, which was performed on Fedora 22 host with QEMU/KVM/Libvirt Hypervisor (16 GB RAM, Intel Core i7-4771 Haswell CPU, ASUS Z97-P ).
    Three VMs (4 GB RAM,4 VCPUS)  have been setup. Controller VM one (management subnet) VNIC, Network Node VM three VNICS (management,vtep's external subnets), Compute Node VM two VNICS (management,vtep's subnets)

SELINUX converted to permissive mode on all depoyment nodes

Actually, straight forward install RDO Kilo on F22 crashes due to relatively simple puppet mistake. Workaround for this issue was recently suggested by   Javier Pena.
1. Manually switch to testing repo after :-
   yum install -y https://rdoproject.org/repos/rdo-release.rpm
2.Then :-  yum install -y openstack-packstack

3.Start packstack for multinode deployment as normal to get files require updates.

After first packstack crash update  /usr/share/ruby/vendor_ruby/puppet/provider/service/systemd.rb to include "22" (in quite obvious place)  on all deployment nodes. Restart packstack multi node deployment.

Expect one more packstack crash , then respond :-
   [root@fedora22wks ~]# systemctl start target
   [root@fedora22wks ~]# systemctl enable  target
restart packstack --answer-file=./answer3Node.txt

I avoid using default libvirt subnet 192.168.122.0/24 for any purposes related
with VM serves as RDO Kilo Nodes, by some reason it causes network congestion when forwarding packets to Internet and vice versa.
 

Three Libvirt networks created

# cat openstackvms.xml
<network>
   <name>openstackvms</name>
   <uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr1' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='192.169.142.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='192.169.142.2' end='192.169.142.254' />
     </dhcp>
   </ip>
 </network>

# cat public.xml
<network>
   <name>public</name>
   <uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr2' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='172.24.4.225' netmask='255.255.255.240'>
     <dhcp>
       <range start='172.24.4.226' end='172.24.4.238' />
     </dhcp>
   </ip>
 </network>

# cat vteps.xml
<network>
   <name>vteps</name>
   <uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr3' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='10.0.0.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='10.0.0.1' end='10.0.0.254' />
     </dhcp>
   </ip>
 </network>

# virsh net-list
 Name                 State      Autostart     Persistent
--------------------------------------------------------------------------
 default               active        yes           yes
 openstackvms    active        yes           yes
 public                active        yes           yes
 vteps                 active         yes          yes


*********************************************************************************
1. First Libvirt subnet "openstackvms"  serves as management network.
All 3 VM are attached to this subnet
**********************************************************************************
2. Second Libvirt subnet "public" serves for simulation external network  Network Node attached to public,latter on "eth2" interface (belongs to "public") is supposed to be converted into OVS port of br-ex on Network Node. This Libvirt subnet via bridge virbr2 172.24.4.225 provides VMs running on Compute Node access to Internet due to match to external network created by packstack installation 172.24.4.224/28.

  


*************************************************
On Hypervisor Host ( Fedora 22)
*************************************************
# iptables -S -t nat 
. . . . . .
-A POSTROUTING -s 172.24.4.224/28 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 172.24.4.224/28 ! -d 172.24.4.224/28 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 172.24.4.224/28 ! -d 172.24.4.224/28 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 172.24.4.224/28 ! -d 172.24.4.224/28 -j MASQUERADE
. . . . . .
***********************************************************************************
3. Third Libvirt subnet "vteps" serves  for VTEPs endpoint simulation. Network and Compute Node VMs are attached to this subnet.
********************************************************************************


************************************
Answer-file - answer3Node.txt
************************************
[root@ip-192-169-142-127 ~(keystone_admin)]# cat answer3Node.txt
[general]
CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
CONFIG_DEFAULT_PASSWORD=
CONFIG_MARIADB_INSTALL=y
CONFIG_GLANCE_INSTALL=y
CONFIG_CINDER_INSTALL=y
CONFIG_NOVA_INSTALL=y
CONFIG_NEUTRON_INSTALL=y
CONFIG_HORIZON_INSTALL=y
CONFIG_SWIFT_INSTALL=y
CONFIG_CEILOMETER_INSTALL=y
CONFIG_HEAT_INSTALL=n
CONFIG_CLIENT_INSTALL=y
CONFIG_NTP_SERVERS=
CONFIG_NAGIOS_INSTALL=y
EXCLUDE_SERVERS=
CONFIG_DEBUG_MODE=n
CONFIG_CONTROLLER_HOST=192.169.142.127
CONFIG_COMPUTE_HOSTS=192.169.142.137
CONFIG_NETWORK_HOSTS=192.169.142.147
CONFIG_VMWARE_BACKEND=n
CONFIG_UNSUPPORTED=n
CONFIG_VCENTER_HOST=
CONFIG_VCENTER_USER=
CONFIG_VCENTER_PASSWORD=
CONFIG_VCENTER_CLUSTER_NAME=
CONFIG_STORAGE_HOST=192.169.142.127
CONFIG_USE_EPEL=y
CONFIG_REPO=
CONFIG_RH_USER=
CONFIG_SATELLITE_URL=
CONFIG_RH_PW=
CONFIG_RH_OPTIONAL=y
CONFIG_RH_PROXY=
CONFIG_RH_PROXY_PORT=
CONFIG_RH_PROXY_USER=
CONFIG_RH_PROXY_PW=
CONFIG_SATELLITE_USER=
CONFIG_SATELLITE_PW=
CONFIG_SATELLITE_AKEY=
CONFIG_SATELLITE_CACERT=
CONFIG_SATELLITE_PROFILE=
CONFIG_SATELLITE_FLAGS=
CONFIG_SATELLITE_PROXY=
CONFIG_SATELLITE_PROXY_USER=
CONFIG_SATELLITE_PROXY_PW=
CONFIG_AMQP_BACKEND=rabbitmq
CONFIG_AMQP_HOST=192.169.142.127
CONFIG_AMQP_ENABLE_SSL=n
CONFIG_AMQP_ENABLE_AUTH=n
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
CONFIG_AMQP_SSL_PORT=5671
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
CONFIG_AMQP_SSL_SELF_SIGNED=y
CONFIG_AMQP_AUTH_USER=amqp_user
CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
CONFIG_MARIADB_HOST=192.169.142.127
CONFIG_MARIADB_USER=root
CONFIG_MARIADB_PW=7207ae344ed04957
CONFIG_KEYSTONE_DB_PW=abcae16b785245c3
CONFIG_KEYSTONE_REGION=RegionOne
CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9
CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468
CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
# Here 2 options available
CONFIG_KEYSTONE_SERVICE_NAME=httpd
# CONFIG_KEYSTONE_SERVICE_NAME=keystone
CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8
CONFIG_GLANCE_KS_PW=f6a9398960534797
CONFIG_GLANCE_BACKEND=file
CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69
CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f
CONFIG_CINDER_BACKEND=lvm
CONFIG_CINDER_VOLUMES_CREATE=y
CONFIG_CINDER_VOLUMES_SIZE=10G
CONFIG_CINDER_GLUSTER_MOUNTS=
CONFIG_CINDER_NFS_MOUNTS=
CONFIG_CINDER_NETAPP_LOGIN=
CONFIG_CINDER_NETAPP_PASSWORD=
CONFIG_CINDER_NETAPP_HOSTNAME=
CONFIG_CINDER_NETAPP_SERVER_PORT=80
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
CONFIG_CINDER_NETAPP_VOLUME_LIST=
CONFIG_CINDER_NETAPP_VFILER=
CONFIG_CINDER_NETAPP_VSERVER=
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
CONFIG_CINDER_NETAPP_SA_PASSWORD=
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8
CONFIG_NOVA_KS_PW=d9583177a2444f06
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
CONFIG_NOVA_COMPUTE_PRIVIF=eth1
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
CONFIG_NOVA_NETWORK_PUBIF=eth0
CONFIG_NOVA_NETWORK_PRIVIF=eth1
CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
CONFIG_NOVA_NETWORK_VLAN_START=100
CONFIG_NOVA_NETWORK_NUMBER=1
CONFIG_NOVA_NETWORK_SIZE=255
CONFIG_NEUTRON_KS_PW=808e36e154bd4cee
CONFIG_NEUTRON_DB_PW=0e2b927a21b44737
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
CONFIG_NEUTRON_L2_PLUGIN=ml2
CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502
CONFIG_LBAAS_INSTALL=n
CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
CONFIG_NEUTRON_FWAAS=n
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=local
CONFIG_NEUTRON_LB_VLAN_RANGES=
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan
CONFIG_NEUTRON_OVS_VLAN_RANGES=
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1001:2000
CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789
CONFIG_HORIZON_SSL=n
CONFIG_SSL_CERT=
CONFIG_SSL_KEY=
CONFIG_SSL_CACHAIN=
CONFIG_SWIFT_KS_PW=8f75bfd461234c30
CONFIG_SWIFT_STORAGES=
CONFIG_SWIFT_STORAGE_ZONES=1
CONFIG_SWIFT_STORAGE_REPLICAS=1
CONFIG_SWIFT_STORAGE_FSTYPE=ext4
CONFIG_SWIFT_HASH=a60aacbedde7429a
CONFIG_SWIFT_STORAGE_SIZE=2G
CONFIG_PROVISION_DEMO=y
CONFIG_PROVISION_TEMPEST=n
CONFIG_PROVISION_TEMPEST_USER=
CONFIG_PROVISION_TEMPEST_USER_PW=44faa4ebc3da4459
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
CONFIG_HEAT_AUTH_ENC_KEY=fc3fb7fee61e46b0
CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
CONFIG_HEAT_CLOUDWATCH_INSTALL=n
CONFIG_HEAT_USING_TRUSTS=y
CONFIG_HEAT_CFN_INSTALL=n
CONFIG_HEAT_DOMAIN=heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
CONFIG_CEILOMETER_SECRET=19ae0e7430174349
CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753
CONFIG_MONGODB_HOST=192.169.142.127
CONFIG_NAGIOS_PW=02f168ee8edd44e4


**********************************************************************************
Up on packstack completion on Network Node create following files ,
designed to  match created by installer external network
**********************************************************************************

[root@ip-192-169-142-147 network-scripts]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="172.24.4.232"
NETMASK="255.255.255.240"
DNS1="83.221.202.254"
BROADCAST="172.24.4.239"
GATEWAY="172.24.4.225"
NM_CONTROLLED="no"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no


[root@ip-192-169-142-147 network-scripts]# cat ifcfg-eth2
DEVICE="eth2"
# HWADDR=00:22:15:63:E4:E2
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

*************************************************
Next step to performed on Network Node :-
*************************************************
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
#reboot

*************************************************
General Three node RDO Kilo system layout
*************************************************



***********************
 Controller Node
***********************
[root@ip-192-169-142-127 neutron(keystone_admin)]# cat /etc/neutron/plugins/ml2/ml2_conf.ini| grep -v ^# | grep -v ^$
[ml2]
type_drivers = vxlan
tenant_network_types = vxlan
mechanism_drivers =openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges =1001:2000
vxlan_group =239.1.1.2
[securitygroup]
enable_security_group = True

   


   Network Node


*********************
Network Node
*********************
[root@ip-192-169-142-147 openvswitch(keystone_admin)]# cat ovs_neutron_plugin.ini | grep -v ^$| grep -v ^#
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip =10.0.0.147
bridge_mappings =physnet1:br-ex
enable_tunneling=True
[agent]
polling_interval = 2
tunnel_types =vxlan
vxlan_udp_port =4789
l2_population = False
arp_responder = False
enable_distributed_routing = False
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver



********************
Compute Node
*******************
[root@ip-192-169-142-137 openvswitch(keystone_admin)]# cat ovs_neutron_plugin.ini | grep -v ^$| grep -v ^#
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip =10.0.0.137
bridge_mappings =physnet1:br-ex
enable_tunneling=True
[agent]
polling_interval = 2
tunnel_types =vxlan
vxlan_udp_port =4789
l2_population = False
arp_responder = False
enable_distributed_routing = False
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

   


   By some reasons virt-manager doesn't allow to set remote connection to Spice
   Session running locally on F22 Virtualization Host 192.168.1.95

   So from remote Fedora host run :-
    
  # ssh -L 5900:127.0.0.1:5900 -N -f -l root 192.168.1.95
    # ssh -L 5901:127.0.0.1:5901 -N -f -l root 192.168.1.95
  # ssh -L 5902:127.0.0.1:5902 -N -f -l root 192.168.1.95

  Then spicy installed on remote host would connect

   1)  to VM 192.169.142.127
        $ spicy -h localhost -p 5902  
   2)  to VM 192.169.142.147
        $ spicy -h localhost -p 5901
   3) to VM 192.169.142.137
        $ spicy -h localhost -p 5900
   


   Dashboard snapshots