UPDATE 10/23/2015
Post updated for final RDO Liberty Release
END UPDATE
Per http://specs.openstack.org/openstack/neutron-specs/specs/juno/neutron-ovs-dvr.html
DVR is supposed to address following problems which has traditional 3 Node
deployment schema:-
Problem 1: Intra VM traffic flows through the Network Node
In this case even VMs traffic that belong to the same tenant
on a different subnet has to hit the Network Node to get routed
between the subnets. This would affect Performance.
Problem 2: VMs with FloatingIP also receive and send packets
through the Network Node Routers.
FloatingIP (DNAT) translation done at the Network Node and also
the external network gateway port is available only at the Network Node.
So any traffic that is intended for the External Network from
the VM will have to go through the Network Node.
In this case the Network Node becomes a single point of failure
and also the traffic load will be heavy in the Network Node.
This would affect the performance and scalability.
Setup configuration
- Controller node: Nova, Keystone, Cinder, Glance,
Neutron (using Open vSwitch plugin && VXLAN )
- (2x) Compute node: Nova (nova-compute),
Neutron (openvswitch-agent,l3-agent,metadata-agent )
Three CentOS 7.1 VMs (4 GB RAM, 4 VCPU, 2 VNICs ) has been built for testing
at Fedora 22 KVM Hypervisor. Two libvirt sub-nets were used first "openstackvms" for emulating External && Mgmt Networks 192.169.142.0/24 gateway virbr1 (192.169.142.1) and "vteps" 10.0.0.0/24 to support two VXLAN tunnels between Controller and Compute Nodes.
# cat openstackvms.xml
<network>
<name>openstackvms</name>
<uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr1' stp='on' delay='0' />
<mac address='52:54:00:60:f8:6d'/>
<ip address='192.169.142.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.169.142.2' end='192.169.142.254' />
</dhcp>
</ip>
</network>
# cat vteps.xml
<network>
<name>vteps</name>
<uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr2' stp='on' delay='0' />
<mac address='52:54:00:60:f8:6d'/>
<ip address='10.0.0.1' netmask='255.255.255.0'>
<dhcp>
<range start='10.0.0.1' end='10.0.0.254' />
</dhcp>
</ip>
</network>
# virsh net-define openstackvms.xml
# virsh net-start openstackvms
# virsh net-autostart openstackvms
Second libvirt sub-net maybe defined and started same way.
ip-192-169-142-127.ip.secureserver.net - Controller/Network Node
ip-192-169-142-137.ip.secureserver.net - Compute Node
ip-192-169-142-147.ip.secureserver.net - Compute Node
**************************************
At this point run on Controller:-
**************************************
# yum -y install centos-release-openstack-liberty
# yum -y install openstack-packstack
# packstack --answer-file=./answer3Node.txt
Answer File :-
*********************
[general]
CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
CONFIG_DEFAULT_PASSWORD=
CONFIG_MARIADB_INSTALL=y
CONFIG_GLANCE_INSTALL=y
CONFIG_CINDER_INSTALL=y
CONFIG_NOVA_INSTALL=y
CONFIG_NEUTRON_INSTALL=y
CONFIG_HORIZON_INSTALL=y
CONFIG_SWIFT_INSTALL=y
CONFIG_CEILOMETER_INSTALL=y
CONFIG_HEAT_INSTALL=n
CONFIG_CLIENT_INSTALL=y
CONFIG_NTP_SERVERS=
CONFIG_NAGIOS_INSTALL=y
EXCLUDE_SERVERS=
CONFIG_DEBUG_MODE=n
CONFIG_CONTROLLER_HOST=192.169.142.127
CONFIG_COMPUTE_HOSTS=192.169.142.137,192.169.142.147
CONFIG_NETWORK_HOSTS=192.169.142.127
CONFIG_VMWARE_BACKEND=n
CONFIG_UNSUPPORTED=n
CONFIG_VCENTER_HOST=
CONFIG_VCENTER_USER=
CONFIG_VCENTER_PASSWORD=
CONFIG_VCENTER_CLUSTER_NAME=
CONFIG_STORAGE_HOST=192.169.142.127
CONFIG_USE_EPEL=y
CONFIG_REPO=
CONFIG_RH_USER=
CONFIG_SATELLITE_URL=
CONFIG_RH_PW=
CONFIG_RH_OPTIONAL=y
CONFIG_RH_PROXY=
CONFIG_RH_PROXY_PORT=
CONFIG_RH_PROXY_USER=
CONFIG_RH_PROXY_PW=
CONFIG_SATELLITE_USER=
CONFIG_SATELLITE_PW=
CONFIG_SATELLITE_AKEY=
CONFIG_SATELLITE_CACERT=
CONFIG_SATELLITE_PROFILE=
CONFIG_SATELLITE_FLAGS=
CONFIG_SATELLITE_PROXY=
CONFIG_SATELLITE_PROXY_USER=
CONFIG_SATELLITE_PROXY_PW=
CONFIG_AMQP_BACKEND=rabbitmq
CONFIG_AMQP_HOST=192.169.142.127
CONFIG_AMQP_ENABLE_SSL=n
CONFIG_AMQP_ENABLE_AUTH=n
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
CONFIG_AMQP_SSL_PORT=5671
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
CONFIG_AMQP_SSL_SELF_SIGNED=y
CONFIG_AMQP_AUTH_USER=amqp_user
CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
CONFIG_MARIADB_HOST=192.169.142.127
CONFIG_MARIADB_USER=root
CONFIG_MARIADB_PW=7207ae344ed04957
CONFIG_KEYSTONE_DB_PW=abcae16b785245c3
CONFIG_KEYSTONE_REGION=RegionOne
CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9
CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468
CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
CONFIG_KEYSTONE_SERVICE_NAME=httpd
CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8
CONFIG_GLANCE_KS_PW=f6a9398960534797
CONFIG_GLANCE_BACKEND=file
CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69
CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f
CONFIG_CINDER_BACKEND=lvm
CONFIG_CINDER_VOLUMES_CREATE=y
CONFIG_CINDER_VOLUMES_SIZE=5G
CONFIG_CINDER_GLUSTER_MOUNTS=
CONFIG_CINDER_NFS_MOUNTS=
CONFIG_CINDER_NETAPP_LOGIN=
CONFIG_CINDER_NETAPP_PASSWORD=
CONFIG_CINDER_NETAPP_HOSTNAME=
CONFIG_CINDER_NETAPP_SERVER_PORT=80
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
CONFIG_CINDER_NETAPP_VOLUME_LIST=
CONFIG_CINDER_NETAPP_VFILER=
CONFIG_CINDER_NETAPP_VSERVER=
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
CONFIG_CINDER_NETAPP_SA_PASSWORD=
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8
CONFIG_NOVA_KS_PW=d9583177a2444f06
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
CONFIG_NOVA_COMPUTE_PRIVIF=eth1
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
CONFIG_NOVA_NETWORK_PUBIF=eth0
CONFIG_NOVA_NETWORK_PRIVIF=eth1
CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
CONFIG_NOVA_NETWORK_VLAN_START=100
CONFIG_NOVA_NETWORK_NUMBER=1
CONFIG_NOVA_NETWORK_SIZE=255
CONFIG_NEUTRON_KS_PW=808e36e154bd4cee
CONFIG_NEUTRON_DB_PW=0e2b927a21b44737
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
CONFIG_NEUTRON_L2_PLUGIN=ml2
CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502
CONFIG_LBAAS_INSTALL=n
CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
CONFIG_NEUTRON_FWAAS=n
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=local
CONFIG_NEUTRON_LB_VLAN_RANGES=
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan
CONFIG_NEUTRON_OVS_VLAN_RANGES=
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1001:2000
CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789
CONFIG_HORIZON_SSL=n
CONFIG_SSL_CERT=
CONFIG_SSL_KEY=
CONFIG_SSL_CACHAIN=
CONFIG_SWIFT_KS_PW=8f75bfd461234c30
CONFIG_SWIFT_STORAGES=
CONFIG_SWIFT_STORAGE_ZONES=1
CONFIG_SWIFT_STORAGE_REPLICAS=1
CONFIG_SWIFT_STORAGE_FSTYPE=ext4
CONFIG_SWIFT_HASH=a60aacbedde7429a
CONFIG_SWIFT_STORAGE_SIZE=2G
CONFIG_PROVISION_DEMO=y
CONFIG_PROVISION_TEMPEST=n
CONFIG_PROVISION_TEMPEST_USER=
CONFIG_PROVISION_TEMPEST_USER_PW=44faa4ebc3da4459
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
CONFIG_HEAT_AUTH_ENC_KEY=fc3fb7fee61e46b0
CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
CONFIG_HEAT_CLOUDWATCH_INSTALL=n
CONFIG_HEAT_USING_TRUSTS=y
CONFIG_HEAT_CFN_INSTALL=n
CONFIG_HEAT_DOMAIN=heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
CONFIG_CEILOMETER_SECRET=19ae0e7430174349
CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753
CONFIG_MONGODB_HOST=192.169.142.127
CONFIG_NAGIOS_PW=02f168ee8edd44e4
********************************************************
On Controller (X=2) and Computes X=(3,4) update :-
********************************************************
# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="192.169.142.1(X)7"
NETMASK="255.255.255.0"
DNS1="83.221.202.254"
BROADCAST="192.169.142.255"
GATEWAY="192.169.142.1"
NM_CONTROLLED="no"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no
ONBOOT="yes"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
# cat ifcfg-eth0
DEVICE="eth0"
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no
***********
Then
***********
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# service network restart
Reboot
**********************************
General information ( [3] )
**********************************
Enabling l2pop :-
On the Neutron API node, in the conf file you pass
to the Neutron service (plugin.ini/ml2_conf.ini):
[ml2]
mechanism_drivers = openvswitch,l2population
On each compute node, in the conf file you pass
to the OVS agent (plugin.ini/ml2_conf.ini):
[agent]
l2_population = True
Enable the ARP responder:
On each compute node, in the conf file
you pass to the OVS agent (plugin.ini/ml2_conf.ini):
[agent]
arp_responder = True
*****************************************
On Controller update neutron.conf
*****************************************
router_distributed = True
dvr_base_mac = fa:16:3f:00:00:00
[root@ip-192-169-142-127 neutron(keystone_admin)]# cat l3_agent.ini | grep -v ^#| grep -v ^$
[DEFAULT]
debug = False
interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver
handle_internal_only_routers = True
external_network_bridge = br-ex
metadata_port = 9697
send_arp_for_ha = 3
periodic_interval = 40
periodic_fuzzy_delay = 5
enable_metadata_proxy = True
router_delete_namespaces = False
agent_mode = dvr_snat
[AGENT]
*********************************
On each Compute Node
*********************************
[root@ip-192-169-142-147 neutron]# cat l3_agent.ini | grep -v ^#| grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
agent_mode = dvr
[AGENT]
[root@ip-192-169-142-147 neutron]# cat metadata_agent.ini | grep -v ^#| grep -v ^$
[DEFAULT]
debug = False
auth_url = http://192.169.142.127:5000/v2.0
auth_region = RegionOne
auth_insecure = False
admin_tenant_name = services
admin_user = neutron
admin_password = 808e36e154bd4cee
nova_metadata_ip = 192.169.142.127
nova_metadata_port = 8775
nova_metadata_protocol = http
metadata_proxy_shared_secret =a965cd23ed2f4502
metadata_workers =4
metadata_backlog = 4096
cache_url = memory://?default_ttl=5
[AGENT]
[root@ip-192-169-142-147 ml2]# pwd
/etc/neutron/plugins/ml2
[root@ip-192-169-142-147 ml2]# cat ml2_conf.ini | grep -v ^$ | grep -v ^#
[ml2]
type_drivers = vxlan
tenant_network_types = vxlan
mechanism_drivers =openvswitch,l2population
path_mtu = 0
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges =1001:2000
vxlan_group =239.1.1.2
[ml2_type_geneve]
[securitygroup]
enable_security_group = True
# On Compute nodes
[agent]
l2_population=True
********************************************************************************
Please, be asvised that command like ( [ 2 ] ) :-
# rsync -av root@192.169.142.127:/etc/neutron/plugins/ml2 /etc/neutron/plugins
been run on Liberty Compute Node 192.169.142.147 will overwrite file
/etc/neutron/plugins/ml2/openvswitch_agent.ini
So, local_ip after this command should be turned backed to it's initial value.
********************************************************************************
[root@ip-192-169-142-147 ml2]# cat openvswitch_agent.ini | grep -v ^#|grep -v ^$
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip =10.0.0.147
bridge_mappings =physnet1:br-ex
enable_tunneling=True
[agent]
polling_interval = 2
tunnel_types =vxlan
vxlan_udp_port =4789
l2_population = True
arp_responder = True
enable_distributed_routing = True
drop_flows_on_start=False
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
***************************************************************************************
On each Compute node neutron-l3-agent and neutron-metadata-agent are
supposed to be started.
***************************************************************************************
# yum install openstack-neutron-ml2
# systemctl start neutron-l3-agent
# systemctl start neutron-metadata-agent
# systemctl enable neutron-l3-agent
# systemctl enable neutron-metadata-agent
[root@ip-192-169-142-147 ~]# systemctl | grep openstack
openstack-ceilometer-compute.service loaded active running OpenStack ceilometer compute agent
openstack-nova-compute.service loaded active running OpenStack Nova Compute Server
[root@ip-192-169-142-147 ~]# systemctl | grep neutron
neutron-l3-agent.service loaded active running OpenStack Neutron Layer 3 Agent
neutron-metadata-agent.service loaded active running OpenStack Neutron Metadata Agent
neutron-openvswitch-agent.service loaded active running OpenStack Neutron Open vSwitch Agent
neutron-ovs-cleanup.service loaded active exited OpenStack Neutron Open vSwitch Cleanup Utility
********************************************************************************************
When floating IP gets assigned to VM , what actually happens ( [1] ):
The same explanation may be found in ([4]) , the only style would not be in step by step manner, in particular, it conatains detailed descriptition of reverse
network flow and ARP Proxy fucntionality
********************************************************************************************
1.The fip-<netid> namespace is created on the local
compute node (if it does not already exist)
2.A new port rfp-<portid> gets created on the qrouter-<routerid>
namespace (if it does not already exist)
3.The rfp port on the qrouter namespace is assigned the associated floating IP
address
4.The fpr port on the fip namespace gets created and linked via point-to-point
network to the rfp port of the qrouter namespace
5.The fip namespace gateway port fg-<portid> is assigned an additional
address from the public network range to set up ARP proxy point
6.The fg-<portid> is configured as a Proxy ARP
***************************************
Network flow itself ( [1] ):
***************************************
1.The VM, initiating transmission, sends a packet via default gateway
and br-int forwards the traffic to the local DVR gateway port (qr-<portid>).
2.DVR routes the packet using the routing table to the rfp-<portid> port
3.The packet is applied NAT rule, replacing the source-IP of VM to
the assigned floating IP, and then it gets sent through the rfp-<portid>
port, which connects to the fip namespace via point-to-point network
169.254.31.28/31
4.The packet is received on the fpr-<portid> port in the fip namespace
and then routed outside through the fg-<portid> port
*********************************************************
In case of particular deployment :-
*********************************************************
[root@ip-192-169-142-147 ~(keystone_admin)]# neutron net-list
+--------------------------------------+--------------+-------------------------------------------------------+
| id | name | subnets |
+--------------------------------------+--------------+-------------------------------------------------------+
| 1b202547-e1de-4c35-86a9-3119d6844f88 | public | e6473e85-5a4c-4eea-a42b-3a63def678c5 192.169.142.0/24 |
| 267c9192-29e2-41e2-8db4-826a6155dec9 | demo_network | 89704ab3-5535-4c87-800e-39255a0a11d9 50.0.0.0/24 |
+--------------------------------------+--------------+------------------------------------------
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns
fip-1b202547-e1de-4c35-86a9-3119d6844f88
qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf
[root@ip-192-169-142-147 ~]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf ip rule
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
57480: from 50.0.0.15 lookup 16
57481: from 50.0.0.13 lookup 16
838860801: from 50.0.0.1/24 lookup 838860801
[root@ip-192-169-142-147 ~]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf ip route show table 16
default via 169.254.31.29 dev rfp-51ed47a7-3
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf ip route
50.0.0.0/24 dev qr-b0a8a232-ab proto kernel scope link src 50.0.0.1
169.254.31.28/31 dev rfp-51ed47a7-3 proto kernel scope link src 169.254.31.28
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf iptables-save -t nat | grep "^-A"|grep l3-agent
-A PREROUTING -j neutron-l3-agent-PREROUTING
-A OUTPUT -j neutron-l3-agent-OUTPUT
-A POSTROUTING -j neutron-l3-agent-POSTROUTING
-A neutron-l3-agent-OUTPUT -d 192.169.142.153/32 -j DNAT --to-destination 50.0.0.13
-A neutron-l3-agent-OUTPUT -d 192.169.142.156/32 -j DNAT --to-destination 50.0.0.15
-A neutron-l3-agent-POSTROUTING ! -i rfp-51ed47a7-3 ! -o rfp-51ed47a7-3 -m conntrack ! --ctstate DNAT -j ACCEPT
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -i qr-+ -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697
-A neutron-l3-agent-PREROUTING -d 192.169.142.153/32 -j DNAT --to-destination 50.0.0.13
-A neutron-l3-agent-PREROUTING -d 192.169.142.156/32 -j DNAT --to-destination 50.0.0.15
-A neutron-l3-agent-float-snat -s 50.0.0.13/32 -j SNAT --to-source 192.169.142.153
-A neutron-l3-agent-float-snat -s 50.0.0.15/32 -j SNAT --to-source 192.169.142.156
-A neutron-l3-agent-snat -j neutron-l3-agent-float-snat
-A neutron-postrouting-bottom -m comment --comment "Perform source NAT on outgoing traffic." -j neutron-l3-agent-snat
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec fip-1b202547-e1de-4c35-86a9-3119d6844f88 ip route
default via 192.169.142.1 dev fg-58e0cabf-07
169.254.31.28/31 dev fpr-51ed47a7-3 proto kernel scope link src 169.254.31.29
192.169.142.0/24 dev fg-58e0cabf-07 proto kernel scope link src 192.169.142.154
192.169.142.153 via 169.254.31.28 dev fpr-51ed47a7-3
192.169.142.156 via 169.254.31.28 dev fpr-51ed47a7-3
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter- 51ed47a7-3fcf-4389-9961-0b457e10cecf ifconfig
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10
loop txqueuelen 0 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
qr-b0a8a232-ab: flags=4163 mtu 1500
inet 50.0.0.1 netmask 255.255.255.0 broadcast 50.0.0.255
inet6 fe80::f816:3eff:fe23:586c prefixlen 64 scopeid 0x20
ether fa:16:3e:23:58:6c txqueuelen 0 (Ethernet)
RX packets 88594 bytes 6742614 (6.4 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 173961 bytes 234594118 (223.7 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
rfp-51ed47a7-3: flags=4163 mtu 1500
inet 169.254.31.28 netmask 255.255.255.254 broadcast 0.0.0.0
inet6 fe80::282e:4bff:fe52:3bca prefixlen 64 scopeid 0x20
ether 2a:2e:4b:52:3b:ca txqueuelen 1000 (Ethernet)
RX packets 173514 bytes 234542852 (223.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 87837 bytes 6670792 (6.3 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@ip-192-169-142-147 ~(keystone_admin)]# ovs-vsctl show
fe2f4449-82fc-45e9-8827-6c6d9c8cc92d
Bridge br-int
fail_mode: secure
Port "qr-b0a8a232-ab"
tag: 1
Interface "qr-b0a8a232-ab"
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port "qvo19855b4d-3b"
tag: 1
Interface "qvo19855b4d-3b"
Port int-br-ex
Interface int-br-ex
type: patch
options: {peer=phy-br-ex}
Port br-int
Interface br-int
type: internal
Port "qvobd487c99-41"
tag: 1
Interface "qvobd487c99-41"
Bridge br-ex
Port "fg-58e0cabf-07"
Interface "fg-58e0cabf-07"
type: internal
Port "eth0"
Interface "eth0"
Port br-ex
Interface br-ex
type: internal
Port phy-br-ex
Interface phy-br-ex
type: patch
options: {peer=int-br-ex}
Bridge br-tun
fail_mode: secure
Port "vxlan-0a00007f"
Interface "vxlan-0a00007f"
type: vxlan
options: {df_default="true", in_key=flow, local_ip="10.0.0.147", out_key=flow, remote_ip="10.0.0.127"}
Port "vxlan-0a000089"
Interface "vxlan-0a000089"
type: vxlan
options: {df_default="true", in_key=flow, local_ip="10.0.0.147", out_key=flow, remote_ip="10.0.0.137"}
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
ovs_version: "2.4.0"
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec fip-1b202547-e1de-4c35-86a9-3119d6844f88 ifconfig
fg-58e0cabf-07: flags=4163 mtu 1500
inet 192.169.142.154 netmask 255.255.255.0 broadcast 192.169.142.255
inet6 fe80::f816:3eff:fe15:efff prefixlen 64 scopeid 0x20
ether fa:16:3e:15:ef:ff txqueuelen 0 (Ethernet)
RX packets 173587 bytes 234547834 (223.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 87751 bytes 6665500 (6.3 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
fpr-51ed47a7-3: flags=4163 mtu 1500
inet 169.254.31.29 netmask 255.255.255.254 broadcast 0.0.0.0
inet6 fe80::a805:e5ff:fe38:3bb1 prefixlen 64 scopeid 0x20
ether aa:05:e5:38:3b:b1 txqueuelen 1000 (Ethernet)
RX packets 87841 bytes 6671008 (6.3 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 173518 bytes 234543068 (223.6 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10
loop txqueuelen 0 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
****************
On Controller
****************
Post updated for final RDO Liberty Release
END UPDATE
Per http://specs.openstack.org/openstack/neutron-specs/specs/juno/neutron-ovs-dvr.html
DVR is supposed to address following problems which has traditional 3 Node
deployment schema:-
Problem 1: Intra VM traffic flows through the Network Node
In this case even VMs traffic that belong to the same tenant
on a different subnet has to hit the Network Node to get routed
between the subnets. This would affect Performance.
Problem 2: VMs with FloatingIP also receive and send packets
through the Network Node Routers.
FloatingIP (DNAT) translation done at the Network Node and also
the external network gateway port is available only at the Network Node.
So any traffic that is intended for the External Network from
the VM will have to go through the Network Node.
In this case the Network Node becomes a single point of failure
and also the traffic load will be heavy in the Network Node.
This would affect the performance and scalability.
Setup configuration
- Controller node: Nova, Keystone, Cinder, Glance,
Neutron (using Open vSwitch plugin && VXLAN )
- (2x) Compute node: Nova (nova-compute),
Neutron (openvswitch-agent,l3-agent,metadata-agent )
Three CentOS 7.1 VMs (4 GB RAM, 4 VCPU, 2 VNICs ) has been built for testing
at Fedora 22 KVM Hypervisor. Two libvirt sub-nets were used first "openstackvms" for emulating External && Mgmt Networks 192.169.142.0/24 gateway virbr1 (192.169.142.1) and "vteps" 10.0.0.0/24 to support two VXLAN tunnels between Controller and Compute Nodes.
# cat openstackvms.xml
<network>
<name>openstackvms</name>
<uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr1' stp='on' delay='0' />
<mac address='52:54:00:60:f8:6d'/>
<ip address='192.169.142.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.169.142.2' end='192.169.142.254' />
</dhcp>
</ip>
</network>
# cat vteps.xml
<network>
<name>vteps</name>
<uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr2' stp='on' delay='0' />
<mac address='52:54:00:60:f8:6d'/>
<ip address='10.0.0.1' netmask='255.255.255.0'>
<dhcp>
<range start='10.0.0.1' end='10.0.0.254' />
</dhcp>
</ip>
</network>
# virsh net-define openstackvms.xml
# virsh net-start openstackvms
# virsh net-autostart openstackvms
Second libvirt sub-net maybe defined and started same way.
ip-192-169-142-127.ip.secureserver.net - Controller/Network Node
ip-192-169-142-137.ip.secureserver.net - Compute Node
ip-192-169-142-147.ip.secureserver.net - Compute Node
**************************************
At this point run on Controller:-
**************************************
# yum -y install centos-release-openstack-liberty
# yum -y install openstack-packstack
# packstack --answer-file=./answer3Node.txt
*********************
Answer File :-
*********************
[general]
CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
CONFIG_DEFAULT_PASSWORD=
CONFIG_MARIADB_INSTALL=y
CONFIG_GLANCE_INSTALL=y
CONFIG_CINDER_INSTALL=y
CONFIG_NOVA_INSTALL=y
CONFIG_NEUTRON_INSTALL=y
CONFIG_HORIZON_INSTALL=y
CONFIG_SWIFT_INSTALL=y
CONFIG_CEILOMETER_INSTALL=y
CONFIG_HEAT_INSTALL=n
CONFIG_CLIENT_INSTALL=y
CONFIG_NTP_SERVERS=
CONFIG_NAGIOS_INSTALL=y
EXCLUDE_SERVERS=
CONFIG_DEBUG_MODE=n
CONFIG_CONTROLLER_HOST=192.169.142.127
CONFIG_COMPUTE_HOSTS=192.169.142.137,192.169.142.147
CONFIG_NETWORK_HOSTS=192.169.142.127
CONFIG_VMWARE_BACKEND=n
CONFIG_UNSUPPORTED=n
CONFIG_VCENTER_HOST=
CONFIG_VCENTER_USER=
CONFIG_VCENTER_PASSWORD=
CONFIG_VCENTER_CLUSTER_NAME=
CONFIG_STORAGE_HOST=192.169.142.127
CONFIG_USE_EPEL=y
CONFIG_REPO=
CONFIG_RH_USER=
CONFIG_SATELLITE_URL=
CONFIG_RH_PW=
CONFIG_RH_OPTIONAL=y
CONFIG_RH_PROXY=
CONFIG_RH_PROXY_PORT=
CONFIG_RH_PROXY_USER=
CONFIG_RH_PROXY_PW=
CONFIG_SATELLITE_USER=
CONFIG_SATELLITE_PW=
CONFIG_SATELLITE_AKEY=
CONFIG_SATELLITE_CACERT=
CONFIG_SATELLITE_PROFILE=
CONFIG_SATELLITE_FLAGS=
CONFIG_SATELLITE_PROXY=
CONFIG_SATELLITE_PROXY_USER=
CONFIG_SATELLITE_PROXY_PW=
CONFIG_AMQP_BACKEND=rabbitmq
CONFIG_AMQP_HOST=192.169.142.127
CONFIG_AMQP_ENABLE_SSL=n
CONFIG_AMQP_ENABLE_AUTH=n
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
CONFIG_AMQP_SSL_PORT=5671
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
CONFIG_AMQP_SSL_SELF_SIGNED=y
CONFIG_AMQP_AUTH_USER=amqp_user
CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
CONFIG_MARIADB_HOST=192.169.142.127
CONFIG_MARIADB_USER=root
CONFIG_MARIADB_PW=7207ae344ed04957
CONFIG_KEYSTONE_DB_PW=abcae16b785245c3
CONFIG_KEYSTONE_REGION=RegionOne
CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9
CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468
CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
CONFIG_KEYSTONE_SERVICE_NAME=httpd
CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8
CONFIG_GLANCE_KS_PW=f6a9398960534797
CONFIG_GLANCE_BACKEND=file
CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69
CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f
CONFIG_CINDER_BACKEND=lvm
CONFIG_CINDER_VOLUMES_CREATE=y
CONFIG_CINDER_VOLUMES_SIZE=5G
CONFIG_CINDER_GLUSTER_MOUNTS=
CONFIG_CINDER_NFS_MOUNTS=
CONFIG_CINDER_NETAPP_LOGIN=
CONFIG_CINDER_NETAPP_PASSWORD=
CONFIG_CINDER_NETAPP_HOSTNAME=
CONFIG_CINDER_NETAPP_SERVER_PORT=80
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
CONFIG_CINDER_NETAPP_VOLUME_LIST=
CONFIG_CINDER_NETAPP_VFILER=
CONFIG_CINDER_NETAPP_VSERVER=
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
CONFIG_CINDER_NETAPP_SA_PASSWORD=
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8
CONFIG_NOVA_KS_PW=d9583177a2444f06
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
CONFIG_NOVA_COMPUTE_PRIVIF=eth1
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
CONFIG_NOVA_NETWORK_PUBIF=eth0
CONFIG_NOVA_NETWORK_PRIVIF=eth1
CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
CONFIG_NOVA_NETWORK_VLAN_START=100
CONFIG_NOVA_NETWORK_NUMBER=1
CONFIG_NOVA_NETWORK_SIZE=255
CONFIG_NEUTRON_KS_PW=808e36e154bd4cee
CONFIG_NEUTRON_DB_PW=0e2b927a21b44737
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
CONFIG_NEUTRON_L2_PLUGIN=ml2
CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502
CONFIG_LBAAS_INSTALL=n
CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
CONFIG_NEUTRON_FWAAS=n
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=local
CONFIG_NEUTRON_LB_VLAN_RANGES=
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan
CONFIG_NEUTRON_OVS_VLAN_RANGES=
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1001:2000
CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789
CONFIG_HORIZON_SSL=n
CONFIG_SSL_CERT=
CONFIG_SSL_KEY=
CONFIG_SSL_CACHAIN=
CONFIG_SWIFT_KS_PW=8f75bfd461234c30
CONFIG_SWIFT_STORAGES=
CONFIG_SWIFT_STORAGE_ZONES=1
CONFIG_SWIFT_STORAGE_REPLICAS=1
CONFIG_SWIFT_STORAGE_FSTYPE=ext4
CONFIG_SWIFT_HASH=a60aacbedde7429a
CONFIG_SWIFT_STORAGE_SIZE=2G
CONFIG_PROVISION_DEMO=y
CONFIG_PROVISION_TEMPEST=n
CONFIG_PROVISION_TEMPEST_USER=
CONFIG_PROVISION_TEMPEST_USER_PW=44faa4ebc3da4459
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
CONFIG_HEAT_AUTH_ENC_KEY=fc3fb7fee61e46b0
CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
CONFIG_HEAT_CLOUDWATCH_INSTALL=n
CONFIG_HEAT_USING_TRUSTS=y
CONFIG_HEAT_CFN_INSTALL=n
CONFIG_HEAT_DOMAIN=heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
CONFIG_CEILOMETER_SECRET=19ae0e7430174349
CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753
CONFIG_MONGODB_HOST=192.169.142.127
CONFIG_NAGIOS_PW=02f168ee8edd44e4
********************************************************
On Controller (X=2) and Computes X=(3,4) update :-
********************************************************
# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="192.169.142.1(X)7"
NETMASK="255.255.255.0"
DNS1="83.221.202.254"
BROADCAST="192.169.142.255"
GATEWAY="192.169.142.1"
NM_CONTROLLED="no"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no
ONBOOT="yes"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
# cat ifcfg-eth0
DEVICE="eth0"
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no
***********
Then
***********
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# service network restart
Reboot
**********************************
General information ( [3] )
**********************************
Enabling l2pop :-
On the Neutron API node, in the conf file you pass
to the Neutron service (plugin.ini/ml2_conf.ini):
[ml2]
mechanism_drivers = openvswitch,l2population
On each compute node, in the conf file you pass
to the OVS agent (plugin.ini/ml2_conf.ini):
[agent]
l2_population = True
Enable the ARP responder:
On each compute node, in the conf file
you pass to the OVS agent (plugin.ini/ml2_conf.ini):
[agent]
arp_responder = True
*****************************************
On Controller update neutron.conf
*****************************************
router_distributed = True
dvr_base_mac = fa:16:3f:00:00:00
[root@ip-192-169-142-127 neutron(keystone_admin)]# cat l3_agent.ini | grep -v ^#| grep -v ^$
[DEFAULT]
debug = False
interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver
handle_internal_only_routers = True
external_network_bridge = br-ex
metadata_port = 9697
send_arp_for_ha = 3
periodic_interval = 40
periodic_fuzzy_delay = 5
enable_metadata_proxy = True
router_delete_namespaces = False
agent_mode = dvr_snat
[AGENT]
*********************************
On each Compute Node
*********************************
[root@ip-192-169-142-147 neutron]# cat l3_agent.ini | grep -v ^#| grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
agent_mode = dvr
[AGENT]
[root@ip-192-169-142-147 neutron]# cat metadata_agent.ini | grep -v ^#| grep -v ^$
[DEFAULT]
debug = False
auth_url = http://192.169.142.127:5000/v2.0
auth_region = RegionOne
auth_insecure = False
admin_tenant_name = services
admin_user = neutron
admin_password = 808e36e154bd4cee
nova_metadata_ip = 192.169.142.127
nova_metadata_port = 8775
nova_metadata_protocol = http
metadata_proxy_shared_secret =a965cd23ed2f4502
metadata_workers =4
metadata_backlog = 4096
cache_url = memory://?default_ttl=5
[AGENT]
[root@ip-192-169-142-147 ml2]# pwd
/etc/neutron/plugins/ml2
[root@ip-192-169-142-147 ml2]# cat ml2_conf.ini | grep -v ^$ | grep -v ^#
[ml2]
type_drivers = vxlan
tenant_network_types = vxlan
mechanism_drivers =openvswitch,l2population
path_mtu = 0
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges =1001:2000
vxlan_group =239.1.1.2
[ml2_type_geneve]
[securitygroup]
enable_security_group = True
# On Compute nodes
[agent]
l2_population=True
********************************************************************************
Please, be asvised that command like ( [ 2 ] ) :-
# rsync -av root@192.169.142.127:/etc/neutron/plugins/ml2 /etc/neutron/plugins
been run on Liberty Compute Node 192.169.142.147 will overwrite file
/etc/neutron/plugins/ml2/openvswitch_agent.ini
So, local_ip after this command should be turned backed to it's initial value.
********************************************************************************
[root@ip-192-169-142-147 ml2]# cat openvswitch_agent.ini | grep -v ^#|grep -v ^$
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip =10.0.0.147
bridge_mappings =physnet1:br-ex
enable_tunneling=True
[agent]
polling_interval = 2
tunnel_types =vxlan
vxlan_udp_port =4789
l2_population = True
arp_responder = True
enable_distributed_routing = True
drop_flows_on_start=False
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
***************************************************************************************
On each Compute node neutron-l3-agent and neutron-metadata-agent are
supposed to be started.
***************************************************************************************
# yum install openstack-neutron-ml2
# systemctl start neutron-l3-agent
# systemctl start neutron-metadata-agent
# systemctl enable neutron-l3-agent
# systemctl enable neutron-metadata-agent
[root@ip-192-169-142-147 ~]# systemctl | grep openstack
openstack-ceilometer-compute.service loaded active running OpenStack ceilometer compute agent
openstack-nova-compute.service loaded active running OpenStack Nova Compute Server
[root@ip-192-169-142-147 ~]# systemctl | grep neutron
neutron-l3-agent.service loaded active running OpenStack Neutron Layer 3 Agent
neutron-metadata-agent.service loaded active running OpenStack Neutron Metadata Agent
neutron-openvswitch-agent.service loaded active running OpenStack Neutron Open vSwitch Agent
neutron-ovs-cleanup.service loaded active exited OpenStack Neutron Open vSwitch Cleanup Utility
********************************************************************************************
When floating IP gets assigned to VM , what actually happens ( [1] ):
The same explanation may be found in ([4]) , the only style would not be in step by step manner, in particular, it conatains detailed descriptition of reverse
network flow and ARP Proxy fucntionality
********************************************************************************************
1.The fip-<netid> namespace is created on the local
compute node (if it does not already exist)
2.A new port rfp-<portid> gets created on the qrouter-<routerid>
namespace (if it does not already exist)
3.The rfp port on the qrouter namespace is assigned the associated floating IP
address
4.The fpr port on the fip namespace gets created and linked via point-to-point
network to the rfp port of the qrouter namespace
5.The fip namespace gateway port fg-<portid> is assigned an additional
address from the public network range to set up ARP proxy point
6.The fg-<portid> is configured as a Proxy ARP
***************************************
Network flow itself ( [1] ):
***************************************
1.The VM, initiating transmission, sends a packet via default gateway
and br-int forwards the traffic to the local DVR gateway port (qr-<portid>).
2.DVR routes the packet using the routing table to the rfp-<portid> port
3.The packet is applied NAT rule, replacing the source-IP of VM to
the assigned floating IP, and then it gets sent through the rfp-<portid>
port, which connects to the fip namespace via point-to-point network
169.254.31.28/31
4.The packet is received on the fpr-<portid> port in the fip namespace
and then routed outside through the fg-<portid> port
*********************************************************
In case of particular deployment :-
*********************************************************
[root@ip-192-169-142-147 ~(keystone_admin)]# neutron net-list
+--------------------------------------+--------------+-------------------------------------------------------+
| id | name | subnets |
+--------------------------------------+--------------+-------------------------------------------------------+
| 1b202547-e1de-4c35-86a9-3119d6844f88 | public | e6473e85-5a4c-4eea-a42b-3a63def678c5 192.169.142.0/24 |
| 267c9192-29e2-41e2-8db4-826a6155dec9 | demo_network | 89704ab3-5535-4c87-800e-39255a0a11d9 50.0.0.0/24 |
+--------------------------------------+--------------+------------------------------------------
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns
fip-1b202547-e1de-4c35-86a9-3119d6844f88
qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf
[root@ip-192-169-142-147 ~]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf ip rule
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
57480: from 50.0.0.15 lookup 16
57481: from 50.0.0.13 lookup 16
838860801: from 50.0.0.1/24 lookup 838860801
[root@ip-192-169-142-147 ~]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf ip route show table 16
default via 169.254.31.29 dev rfp-51ed47a7-3
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf ip route
50.0.0.0/24 dev qr-b0a8a232-ab proto kernel scope link src 50.0.0.1
169.254.31.28/31 dev rfp-51ed47a7-3 proto kernel scope link src 169.254.31.28
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-51ed47a7-3fcf-4389-9961-0b457e10cecf iptables-save -t nat | grep "^-A"|grep l3-agent
-A PREROUTING -j neutron-l3-agent-PREROUTING
-A OUTPUT -j neutron-l3-agent-OUTPUT
-A POSTROUTING -j neutron-l3-agent-POSTROUTING
-A neutron-l3-agent-OUTPUT -d 192.169.142.153/32 -j DNAT --to-destination 50.0.0.13
-A neutron-l3-agent-OUTPUT -d 192.169.142.156/32 -j DNAT --to-destination 50.0.0.15
-A neutron-l3-agent-POSTROUTING ! -i rfp-51ed47a7-3 ! -o rfp-51ed47a7-3 -m conntrack ! --ctstate DNAT -j ACCEPT
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -i qr-+ -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697
-A neutron-l3-agent-PREROUTING -d 192.169.142.153/32 -j DNAT --to-destination 50.0.0.13
-A neutron-l3-agent-PREROUTING -d 192.169.142.156/32 -j DNAT --to-destination 50.0.0.15
-A neutron-l3-agent-float-snat -s 50.0.0.13/32 -j SNAT --to-source 192.169.142.153
-A neutron-l3-agent-float-snat -s 50.0.0.15/32 -j SNAT --to-source 192.169.142.156
-A neutron-l3-agent-snat -j neutron-l3-agent-float-snat
-A neutron-postrouting-bottom -m comment --comment "Perform source NAT on outgoing traffic." -j neutron-l3-agent-snat
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec fip-1b202547-e1de-4c35-86a9-3119d6844f88 ip route
default via 192.169.142.1 dev fg-58e0cabf-07
169.254.31.28/31 dev fpr-51ed47a7-3 proto kernel scope link src 169.254.31.29
192.169.142.0/24 dev fg-58e0cabf-07 proto kernel scope link src 192.169.142.154
192.169.142.153 via 169.254.31.28 dev fpr-51ed47a7-3
192.169.142.156 via 169.254.31.28 dev fpr-51ed47a7-3
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter- 51ed47a7-3fcf-4389-9961-0b457e10cecf ifconfig
lo: flags=73
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10
loop txqueuelen 0 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
qr-b0a8a232-ab: flags=4163
inet 50.0.0.1 netmask 255.255.255.0 broadcast 50.0.0.255
inet6 fe80::f816:3eff:fe23:586c prefixlen 64 scopeid 0x20
ether fa:16:3e:23:58:6c txqueuelen 0 (Ethernet)
RX packets 88594 bytes 6742614 (6.4 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 173961 bytes 234594118 (223.7 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
rfp-51ed47a7-3: flags=4163
inet 169.254.31.28 netmask 255.255.255.254 broadcast 0.0.0.0
inet6 fe80::282e:4bff:fe52:3bca prefixlen 64 scopeid 0x20
ether 2a:2e:4b:52:3b:ca txqueuelen 1000 (Ethernet)
RX packets 173514 bytes 234542852 (223.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 87837 bytes 6670792 (6.3 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@ip-192-169-142-147 ~(keystone_admin)]# ovs-vsctl show
fe2f4449-82fc-45e9-8827-6c6d9c8cc92d
Bridge br-int
fail_mode: secure
Port "qr-b0a8a232-ab"
tag: 1
Interface "qr-b0a8a232-ab"
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port "qvo19855b4d-3b"
tag: 1
Interface "qvo19855b4d-3b"
Port int-br-ex
Interface int-br-ex
type: patch
options: {peer=phy-br-ex}
Port br-int
Interface br-int
type: internal
Port "qvobd487c99-41"
tag: 1
Interface "qvobd487c99-41"
Bridge br-ex
Port "fg-58e0cabf-07"
Interface "fg-58e0cabf-07"
type: internal
Port "eth0"
Interface "eth0"
Port br-ex
Interface br-ex
type: internal
Port phy-br-ex
Interface phy-br-ex
type: patch
options: {peer=int-br-ex}
Bridge br-tun
fail_mode: secure
Port "vxlan-0a00007f"
Interface "vxlan-0a00007f"
type: vxlan
options: {df_default="true", in_key=flow, local_ip="10.0.0.147", out_key=flow, remote_ip="10.0.0.127"}
Port "vxlan-0a000089"
Interface "vxlan-0a000089"
type: vxlan
options: {df_default="true", in_key=flow, local_ip="10.0.0.147", out_key=flow, remote_ip="10.0.0.137"}
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
ovs_version: "2.4.0"
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec fip-1b202547-e1de-4c35-86a9-3119d6844f88 ifconfig
fg-58e0cabf-07: flags=4163
inet 192.169.142.154 netmask 255.255.255.0 broadcast 192.169.142.255
inet6 fe80::f816:3eff:fe15:efff prefixlen 64 scopeid 0x20
ether fa:16:3e:15:ef:ff txqueuelen 0 (Ethernet)
RX packets 173587 bytes 234547834 (223.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 87751 bytes 6665500 (6.3 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
fpr-51ed47a7-3: flags=4163
inet 169.254.31.29 netmask 255.255.255.254 broadcast 0.0.0.0
inet6 fe80::a805:e5ff:fe38:3bb1 prefixlen 64 scopeid 0x20
ether aa:05:e5:38:3b:b1 txqueuelen 1000 (Ethernet)
RX packets 87841 bytes 6671008 (6.3 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 173518 bytes 234543068 (223.6 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10
loop txqueuelen 0 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
****************
On Controller
****************