Sunday, April 20, 2014

TapInterfaces & Namespaces (just a snapshot)

[root@dfw02 ~(keystone_admin)]$ ip netns list

qrouter-86b3008c-297f-4301-9bdc-766b839785f1
qrouter-bf360d81-79fb-4636-8241-0a843f228fc8
qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7
qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 ip a

1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever


2: qr-e031db6b-d0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:83:90:90 brd ff:ff:ff:ff:ff:ff
    inet 40.0.0.1/24 brd 40.0.0.255 scope global qr-e031db6b-d0
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe83:9090/64 scope link
       valid_lft forever preferred_lft forever


3: qg-9c090153-08: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:b2:24:bc brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.114/24 brd 192.168.1.255 scope global qg-9c090153-08
       valid_lft forever preferred_lft forever
    inet 192.168.1.104/32 brd 192.168.1.104 scope global qg-9c090153-08
       valid_lft forever preferred_lft forever
    inet 192.168.1.102/32 brd 192.168.1.102 scope global qg-9c090153-08
       valid_lft forever preferred_lft forever
    inet 192.168.1.105/32 brd 192.168.1.105 scope global qg-9c090153-08
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:feb2:24bc/64 scope link
       valid_lft forever preferred_lft forever

[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show| grep e031db6b-d0
        Port "tape031db6b-d0"
            Interface "tape031db6b-d0"


[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show| grep 9c090153-08
        Port "tap9c090153-08"
            Interface "tap9c090153-08"

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8 ip a

1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
 

2: qr-f933e768-42: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:6a:d3:f0 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.1/24 brd 10.0.0.255 scope global qr-f933e768-42
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe6a:d3f0/64 scope link
       valid_lft forever preferred_lft forever
 

3: qg-54e34740-87: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:00:9a:0d brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.100/24 brd 192.168.1.255 scope global qg-54e34740-87
       valid_lft forever preferred_lft forever
    inet 192.168.1.103/32 brd 192.168.1.103 scope global qg-54e34740-87
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe00:9a0d/64 scope link
       valid_lft forever preferred_lft forever

[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show| grep f933e768-42
        Port "tapf933e768-42"
            Interface "tapf933e768-42"
 

[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show| grep  54e34740-87
        Port "tap54e34740-87"
            Interface "tap54e34740-87"

[root@dfw02 ~(keystone_admin)]$ ip netns list
qrouter-86b3008c-297f-4301-9bdc-766b839785f1
qrouter-bf360d81-79fb-4636-8241-0a843f228fc8
qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7
qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b

[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7 ip a

1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever


2: ns-40dd712c-e4: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:93:44:f8 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.3/24 brd 10.0.0.255 scope global ns-40dd712c-e4
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe93:44f8/64 scope link
       valid_lft forever preferred_lft forever


[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show| grep  40dd712c-e4
        Port "tap40dd712c-e4"
            Interface "tap40dd712c-e4"
[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b  ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever


2: ns-343b0090-24: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:01:8b:55 brd ff:ff:ff:ff:ff:ff
    inet 40.0.0.3/24 brd 40.0.0.255 scope global ns-343b0090-24
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe01:8b55/64 scope link
       valid_lft forever preferred_lft forever


[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show| grep  343b0090-24
        Port "tap343b0090-24"
            Interface "tap343b0090-24"

******************************************************************************

[root@dfw02 ~(keystone_admin)]$ ovs-vsctl show
7d78d536-3612-416e-bce6-24605088212f
    Bridge br-ex
        Port "p37p1"
            Interface "p37p1"

        Port "tap54e34740-87"  <= qrouter-bf360d81-79fb-4636-8241-0a843f228fc8 
            Interface "tap54e34740-87"
        Port br-ex
            Interface br-ex
                type: internal
        Port "tap9c090153-08" <= qrouter-86b3008c-297f-4301-9bdc-766b839785f1
            Interface "tap9c090153-08"
    Bridge br-int
        Port br-int
            Interface br-int

                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "tape031db6b-d0" <= qrouter-86b3008c-297f-4301-9bdc-766b839785f1
            tag: 1
            Interface "tape031db6b-d0"
        Port "tapf933e768-42"   <= qrouter-bf360d81-79fb-4636-8241-0a843f228fc8
            tag: 2
            Interface "tapf933e768-42"
        Port "tap40dd712c-e4"   <= qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7
            tag: 2
            Interface "tap40dd712c-e4"
        Port "tap343b0090-24"   <= qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b
            tag: 1
            Interface "tap343b0090-24"
    Bridge br-tun
        Port "gre-2"
            Interface "gre-2"
                type: gre
                options: {in_key=flow, local_ip="192.168.1.127", out_key=flow, remote_ip="192.168.1.137"}
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port br-tun
            Interface br-tun
                type: internal
    ovs_version: "2.0.1"

Monday, April 14, 2014

RDO Havana Neutron Namespaces Troubleshooting for OVS&VLAN (GRE) Config


The  OpenStack Networking components are deployed on the Controller, Compute, and Network nodes in the following configuration:

In case of Two Node Development Cluster :-

Controller node: hosts the Neutron server service, which provides the networking API and communicates with and tracks the agents.
        DHCP agent: spawns and controls dnsmasq processes to provide leases to instances. This agent also spawns neutron-ns-metadata-proxy processes as part of the metadata system.
        Metadata agent: Provides a metadata proxy to the nova-api-metadata service. The neutron-ns-metadata-proxy direct traffic that they receive in their namespaces to the proxy.
        OVS plugin agent: Controls OVS network bridges and routes between them via patch, tunnel, or tap without requiring an external OpenFlow controller.
        L3 agent: performs L3 forwarding and NAT.

In case of Three Node or more ( several Compute Nodes) :-

Separate box hosts Neutron Server and all services mentioned above

Compute node: has an OVS plugin agent and openstack-nova-compute service.

Namespaces (View  Identifying and Troubleshooting Neutron Namespaces )

For each network you create, the Network node (or Controller node, if combined) will have a unique network namespace (netns) created by the DHCP and Metadata agents. The netns hosts an interface and IP addresses for dnsmasq and the neutron-ns-metadata-proxy. You can view the namespaces with the `ip netns list`  command, and can interact with the namespaces with the `ip netns exec namespace command`   command.

Every l2-agent/private network has an associated dhcp namespace and
Every l3-agent/router has an associated router namespace.

Network namespace starts with dhcp- followed by the ID of the network.
Router namespace starts with qrouter- followed by the ID of the router.






Source admin credentials and get network list


[root@dfw02 ~(keystone_admin)]$ neutron net-list
+--------------------------------------+------+-----------------------------------------------------+
| id                                   | name | subnets                                             |
+--------------------------------------+------+-----------------------------------------------------+
| 1eea88bb-4952-4aa4-9148-18b61c22d5b7 | int  | fa930cea-3d51-4cbe-a305-579f12aa53c0 10.0.0.0/24    |
| 426bb226-0ab9-440d-ba14-05634a17fb2b | int1 | 9e0d457b-c4c4-45cf-84e2-4ac7550f3b06 40.0.0.0/24    |
| 780ce2f3-2e6e-4881-bbac-857813f9a8e0 | ext  | f30e5a16-a055-4388-a6ea-91ee142efc3d 192.168.1.0/24 |
+--------------------------------------+------+-----------------------------------------------------+

Using command `ip netns list` run following commands to get tenants
qdhcp-* names

[root@dfw02 ~(keystone_admin)]$ ip netns list | grep 1eea88bb-4952-4aa4-9148-18b61c22d5b7
qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7

[root@dfw02 ~(keystone_admin)]$ ip netns list | grep 426bb226-0ab9-440d-ba14-05634a17fb2b
qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b


Check tenants Namespace via getting IP and ping this IP inside namespaces

[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b ifconfig
lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 35  bytes 4416 (4.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 35  bytes 4416 (4.3 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ns-343b0090-24: flags=4163  mtu 1500
        inet 40.0.0.3  netmask 255.255.255.0  broadcast 40.0.0.255

        inet6 fe80::f816:3eff:fe01:8b55  prefixlen 64  scopeid 0x20
        ether fa:16:3e:01:8b:55  txqueuelen 1000  (Ethernet)
        RX packets 3251  bytes 386284 (377.2 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 1774  bytes 344082 (336.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b ping -c 3 40.0.0.3

PING 40.0.0.3 (40.0.0.3) 56(84) bytes of data.
64 bytes from 40.0.0.3: icmp_seq=1 ttl=64 time=0.047 ms
64 bytes from 40.0.0.3: icmp_seq=2 ttl=64 time=0.041 ms
64 bytes from 40.0.0.3: icmp_seq=3 ttl=64 time=0.032 ms

--- 40.0.0.3 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.032/0.040/0.047/0.006 ms



[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b netstat -4 -anpt 

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name   
tcp        0      0 40.0.0.3:53             0.0.0.0:*           LISTEN      4370/dnsmasq       
 

[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7 netstat -4 -anpt

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name   
tcp        0      0 10.0.0.3:53             0.0.0.0:*               LISTEN      4368/dnsmasq


Now verify that we have a copy of dnsmasq process to support every tenants namespace

[root@dfw02 ~(keystone_admin)]$  ps -aux | grep dhcp

neutron   2431  0.3  0.3 263948 30700 ?        Ss   08:42   0:28 /usr/bin/python /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log

nobody    4368  0.0  0.0  15532   832 ?        S    08:43   0:00 dnsmasq --no-hosts --no-resolv --strict-order --bind-interfaces --interface=ns-40dd712c-e4 --except-interface=lo --pid-file=/var/lib/neutron/dhcp/1eea88bb-4952-4aa4-9148-18b61c22d5b7/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/1eea88bb-4952-4aa4-9148-18b61c22d5b7/host --dhcp-optsfile=/var/lib/neutron/dhcp/1eea88bb-4952-4aa4-9148-18b61c22d5b7/opts --leasefile-ro --dhcp-range=set:tag0,10.0.0.0,static,120s --dhcp-lease-max=256 --conf-file=/etc/neutron/dnsmasq.conf --domain=openstacklocal

nobody    4370  0.0  0.0  15532   872 ?        S    08:43   0:00 dnsmasq --no-hosts --no-resolv --strict-order --bind-interfaces --interface=ns-343b0090-24 --except-interface=lo --pid-file=/var/lib/neutron/dhcp/426bb226-0ab9-440d-ba14-05634a17fb2b/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/426bb226-0ab9-440d-ba14-05634a17fb2b/host --dhcp-optsfile=/var/lib/neutron/dhcp/426bb226-0ab9-440d-ba14-05634a17fb2b/opts --leasefile-ro --dhcp-range=set:tag0,40.0.0.0,static,120s --dhcp-lease-max=256 --conf-file=/etc/neutron/dnsmasq.conf --domain=openstacklocal


[root@dfw02 ~(keystone_admin)]$ ps -f --pid 4370 | fold -s -w 82
UID        PID  PPID  C STIME TTY          TIME CMD
nobody    4370     1  0 08:43 ?        00:00:00 dnsmasq --no-hosts --no-resolv
--strict-order --bind-interfaces --interface=ns-343b0090-24 --except-interface=lo
--pid-file=/var/lib/neutron/dhcp/426bb226-0ab9-440d-ba14-05634a17fb2b/pid
--dhcp-hostsfile=/var/lib/neutron/dhcp/426bb226-0ab9-440d-ba14-05634a17fb2b/host
--dhcp-optsfile=/var/lib/neutron/dhcp/426bb226-0ab9-440d-ba14-05634a17fb2b/opts
--leasefile-ro --dhcp-range=set:tag0,40.0.0.0,static,120s --dhcp-lease-max=256
--conf-file=/etc/neutron/dnsmasq.conf --domain=openstacklocal

[root@dfw02 ~(keystone_admin)]$ ps -f --pid 4368 | fold -s -w 82
UID        PID  PPID  C STIME TTY          TIME CMD
nobody    4368     1  0 08:43 ?        00:00:00 dnsmasq --no-hosts --no-resolv
--strict-order --bind-interfaces --interface=ns-40dd712c-e4 --except-interface=lo
--pid-file=/var/lib/neutron/dhcp/1eea88bb-4952-4aa4-9148-18b61c22d5b7/pid
--dhcp-hostsfile=/var/lib/neutron/dhcp/1eea88bb-4952-4aa4-9148-18b61c22d5b7/host
--dhcp-optsfile=/var/lib/neutron/dhcp/1eea88bb-4952-4aa4-9148-18b61c22d5b7/opts
--leasefile-ro --dhcp-range=set:tag0,10.0.0.0,static,120s --dhcp-lease-max=256
--conf-file=/etc/neutron/dnsmasq.conf --domain=openstacklocal


List interfaces inside dhcp namespace

[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b ip a

1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever


2: ns-343b0090-24: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether fa:16:3e:01:8b:55 brd ff:ff:ff:ff:ff:ff
    inet 40.0.0.3/24 brd 40.0.0.255 scope global ns-343b0090-24
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe01:8b55/64 scope link
       valid_lft forever preferred_lft forever


(A)  From the instance to a router

Check routing inside dhcp namespace
 
[root@dfw02 ~(keystone_admin)]$ ip netns exec qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b  ip r
 

default via 40.0.0.1 dev ns-343b0090-24
40.0.0.0/24 dev ns-343b0090-24  proto kernel  scope link  src 40.0.0.3

Check routing inside the router namespace

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 ip r

default via 192.168.1.1 dev qg-9c090153-08
40.0.0.0/24 dev qr-e031db6b-d0  proto kernel  scope link  src 40.0.0.1
192.168.1.0/24 dev qg-9c090153-08  proto kernel  scope link  src 192.168.1.114 

Get routers list  via similar grep and network-id to obtain Routers Namespaces

[root@dfw02 ~(keystone_admin)]$ neutron router-list
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| id                                   | name    | external_gateway_info                                                       |
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| 86b3008c-297f-4301-9bdc-766b839785f1 | router2 | {"network_id": "780ce2f3-2e6e-4881-bbac-857813f9a8e0", "enable_snat": true} |
| bf360d81-79fb-4636-8241-0a843f228fc8 | router1 | {"network_id": "780ce2f3-2e6e-4881-bbac-857813f9a8e0", "enable_snat": true} |
+--------------------------------------+---------+-----------------------------------------------------------------------------+

 Now get qrouter-* namespaces via `ip netns list` command :- 

[root@dfw02 ~(keystone_admin)]$ ip netns list | grep 86b3008c-297f-4301-9bdc-766b839785f1
qrouter-86b3008c-297f-4301-9bdc-766b839785f1


[root@dfw02 ~(keystone_admin)]$ ip netns list | grep  bf360d81-79fb-4636-8241-0a843f228fc8
qrouter-bf360d81-79fb-4636-8241-0a843f228fc8



Now verify L3 forwarding & NAT via command  `iptables -L -t nat` inside router namespace and check  routing   port 80 for 169.254.169.254 to the RDO Havana Controller's ( in my configuration running Neutron Server Service along with all agents) host at metadata port 8700


[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 iptables -L -t nat

Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination        
neutron-l3-agent-PREROUTING  all  --  anywhere             anywhere           

Chain INPUT (policy ACCEPT)
target     prot opt source               destination        

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination        
neutron-l3-agent-OUTPUT  all  --  anywhere             anywhere           

Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination        
neutron-l3-agent-POSTROUTING  all  --  anywhere             anywhere           
neutron-postrouting-bottom  all  --  anywhere             anywhere           

Chain neutron-l3-agent-OUTPUT (1 references)
target     prot opt source               destination        
DNAT       all  --  anywhere             dfw02.localdomain    to:40.0.0.2
DNAT       all  --  anywhere             dfw02.localdomain    to:40.0.0.6
DNAT       all  --  anywhere             dfw02.localdomain    to:40.0.0.5

Chain neutron-l3-agent-POSTROUTING (1 references)
target     prot opt source               destination        
ACCEPT     all  --  anywhere             anywhere             ! ctstate DNAT

Chain neutron-l3-agent-PREROUTING (1 references)
target     prot opt source               destination        
REDIRECT   tcp  --  anywhere             169.254.169.254      tcp dpt:http redir ports 8700
DNAT       all  --  anywhere             dfw02.localdomain    to:40.0.0.2
DNAT       all  --  anywhere             dfw02.localdomain    to:40.0.0.6
DNAT       all  --  anywhere             dfw02.localdomain    to:40.0.0.5

Chain neutron-l3-agent-float-snat (1 references)
target     prot opt source               destination        
SNAT       all  --  40.0.0.2             anywhere             to:192.168.1.107
SNAT       all  --  40.0.0.6             anywhere             to:192.168.1.104
SNAT       all  --  40.0.0.5             anywhere             to:192.168.1.110

Chain neutron-l3-agent-snat (1 references)
target     prot opt source               destination        
neutron-l3-agent-float-snat  all  --  anywhere             anywhere           
SNAT       all  --  40.0.0.0/24          anywhere             to:192.168.1.114

Chain neutron-postrouting-bottom (1 references)
target     prot opt source               destination        
neutron-l3-agent-snat  all  --  anywhere             anywhere           



[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8  iptables -L -t nat

Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination        
neutron-l3-agent-PREROUTING  all  --  anywhere             anywhere           

Chain INPUT (policy ACCEPT)
target     prot opt source               destination        

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination        
neutron-l3-agent-OUTPUT  all  --  anywhere             anywhere           

Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination        
neutron-l3-agent-POSTROUTING  all  --  anywhere             anywhere           
neutron-postrouting-bottom  all  --  anywhere             anywhere           

Chain neutron-l3-agent-OUTPUT (1 references)
target     prot opt source               destination        
DNAT       all  --  anywhere             dfw02.localdomain    to:10.0.0.2

Chain neutron-l3-agent-POSTROUTING (1 references)
target     prot opt source               destination        
ACCEPT     all  --  anywhere             anywhere             ! ctstate DNAT

Chain neutron-l3-agent-PREROUTING (1 references)
target     prot opt source               destination        
REDIRECT   tcp  --  anywhere             169.254.169.254      tcp dpt:http redir ports 8700
DNAT       all  --  anywhere             dfw02.localdomain    to:10.0.0.2

Chain neutron-l3-agent-float-snat (1 references)
target     prot opt source               destination        
SNAT       all  --  10.0.0.2             anywhere             to:192.168.1.103

Chain neutron-l3-agent-snat (1 references)
target     prot opt source               destination        
neutron-l3-agent-float-snat  all  --  anywhere             anywhere           
SNAT       all  --  10.0.0.0/24          anywhere             to:192.168.1.100

Chain neutron-postrouting-bottom (1 references)
target     prot opt source               destination        
neutron-l3-agent-snat  all  --  anywhere             anywhere


(B) ( through a NAT rule in the router namespace)
 
Check the NAT table


[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 iptables -t nat -S
-P PREROUTING ACCEPT
-P INPUT ACCEPT
-P OUTPUT ACCEPT
-P POSTROUTING ACCEPT
-N neutron-l3-agent-OUTPUT
-N neutron-l3-agent-POSTROUTING
-N neutron-l3-agent-PREROUTING
-N neutron-l3-agent-float-snat
-N neutron-l3-agent-snat
-N neutron-postrouting-bottom
-A PREROUTING -j neutron-l3-agent-PREROUTING
-A OUTPUT -j neutron-l3-agent-OUTPUT
-A POSTROUTING -j neutron-l3-agent-POSTROUTING
-A POSTROUTING -j neutron-postrouting-bottom
-A neutron-l3-agent-OUTPUT -d 192.168.1.112/32 -j DNAT --to-destination 40.0.0.2
-A neutron-l3-agent-OUTPUT -d 192.168.1.113/32 -j DNAT --to-destination 40.0.0.4
-A neutron-l3-agent-OUTPUT -d 192.168.1.104/32 -j DNAT --to-destination 40.0.0.6
-A neutron-l3-agent-OUTPUT -d 192.168.1.110/32 -j DNAT --to-destination 40.0.0.5
-A neutron-l3-agent-POSTROUTING ! -i qg-9c090153-08 ! -o qg-9c090153-08 -m conntrack ! --ctstate DNAT -j ACCEPT
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8700
-A neutron-l3-agent-PREROUTING -d 192.168.1.112/32 -j DNAT --to-destination 40.0.0.2
-A neutron-l3-agent-PREROUTING -d 192.168.1.113/32 -j DNAT --to-destination 40.0.0.4
-A neutron-l3-agent-PREROUTING -d 192.168.1.104/32 -j DNAT --to-destination 40.0.0.6
-A neutron-l3-agent-PREROUTING -d 192.168.1.110/32 -j DNAT --to-destination 40.0.0.5
-A neutron-l3-agent-float-snat -s 40.0.0.2/32 -j SNAT --to-source 192.168.1.112
-A neutron-l3-agent-float-snat -s 40.0.0.4/32 -j SNAT --to-source 192.168.1.113
-A neutron-l3-agent-float-snat -s 40.0.0.6/32 -j SNAT --to-source 192.168.1.104
-A neutron-l3-agent-float-snat -s 40.0.0.5/32 -j SNAT --to-source 192.168.1.110
-A neutron-l3-agent-snat -j neutron-l3-agent-float-snat
-A neutron-l3-agent-snat -s 40.0.0.0/24 -j SNAT --to-source 192.168.1.114
-A neutron-postrouting-bottom -j neutron-l3-agent-snat

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8 iptables -t nat -S
-P PREROUTING ACCEPT
-P INPUT ACCEPT
-P OUTPUT ACCEPT
-P POSTROUTING ACCEPT
-N neutron-l3-agent-OUTPUT
-N neutron-l3-agent-POSTROUTING
-N neutron-l3-agent-PREROUTING
-N neutron-l3-agent-float-snat
-N neutron-l3-agent-snat
-N neutron-postrouting-bottom
-A PREROUTING -j neutron-l3-agent-PREROUTING
-A OUTPUT -j neutron-l3-agent-OUTPUT
-A POSTROUTING -j neutron-l3-agent-POSTROUTING
-A POSTROUTING -j neutron-postrouting-bottom
-A neutron-l3-agent-OUTPUT -d 192.168.1.103/32 -j DNAT --to-destination 10.0.0.2
-A neutron-l3-agent-POSTROUTING ! -i qg-54e34740-87 ! -o qg-54e34740-87 -m conntrack ! --ctstate DNAT -j ACCEPT
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8700
-A neutron-l3-agent-PREROUTING -d 192.168.1.103/32 -j DNAT --to-destination 10.0.0.2
-A neutron-l3-agent-float-snat -s 10.0.0.2/32 -j SNAT --to-source 192.168.1.103
-A neutron-l3-agent-snat -j neutron-l3-agent-float-snat
-A neutron-l3-agent-snat -s 10.0.0.0/24 -j SNAT --to-source 192.168.1.100
-A neutron-postrouting-bottom -j neutron-l3-agent-snat


Ping to verify network connections

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 ping 8.8.8.8
PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
64 bytes from 8.8.8.8: icmp_seq=1 ttl=47 time=42.6 ms
64 bytes from 8.8.8.8: icmp_seq=2 ttl=47 time=40.8 ms
64 bytes from 8.8.8.8: icmp_seq=3 ttl=47 time=41.6 ms
64 bytes from 8.8.8.8: icmp_seq=4 ttl=47 time=41.0 ms
          


Verifying  service listening at 8700 port  inside routers namespaces 
output seems like this :-

(C) (to an instance of the neutron-ns-metadata-proxy)

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1  netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      4946/python        

Check process with pid 4946

[root@dfw02 ~(keystone_admin)]$ ps -ef | grep 4946

root      4946     1  0 08:58 ?        00:00:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/86b3008c-297f-4301-9bdc-766b839785f1.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=86b3008c-297f-4301-9bdc-766b839785f1 --state_path=/var/lib/neutron --metadata_port=8700 --verbose --log-file=neutron-ns-metadata-proxy-86b3008c-297f-4301-9bdc-766b839785f1.log --log-dir=/var/log/neutron
root     10396 11489  0 16:33 pts/3    00:00:00 grep --color=auto 4946


[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8  netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      4746/python

 Check process with pid 4746
       
[root@dfw02 ~(keystone_admin)]$ ps -ef | grep 4746


root      4746     1  0 08:58 ?        00:00:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/bf360d81-79fb-4636-8241-0a843f228fc8.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=bf360d81-79fb-4636-8241-0a843f228fc8 --state_path=/var/lib/neutron --metadata_port=8700 --verbose --log-file=neutron-ns-metadata-proxy-bf360d81-79fb-4636-8241-0a843f228fc8.log --log-dir=/var/log/neutron

Now run following commands inside routers namespaces to check status of neutron-metadata port :-

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1  netstat -na

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State     
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN    
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8  netstat -na

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State     
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN    
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

OR this way

[root@dfw02 ~(keystone_admin)]$  ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 netstat -anpt

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name   
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      4667/python        

[root@dfw02 ~(keystone_admin)]$  ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8 netstat -anpt

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name   
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      4459/python        

[root@dfw02 ~(keystone_admin)]$ ps -aux | grep 4667

boris     3638  0.0  0.0 466780  7652 ?        Sl   08:43   0:00 /usr/bin/ibus-daemon --replace --xim --panel disable

root      4667  0.0  0.3 263788 25664 ?        S    08:43   0:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/86b3008c-297f-4301-9bdc-766b839785f1.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=86b3008c-297f-4301-9bdc-766b839785f1 --state_path=/var/lib/neutron --metadata_port=8700 --verbose --log-file=neutron-ns-metadata-proxy-86b3008c-297f-4301-9bdc-766b839785f1.log --log-dir=/var/log/neutron
root     21538  0.0  0.0 112676   916 pts/0    R+   12:26   0:00 grep --color=auto 4667

[root@dfw02 ~(keystone_admin)]$ ps -aux | grep 4459
 

root      4459  0.0  0.2 260052 24080 ?        S    08:43   0:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/bf360d81-79fb-4636-8241-0a843f228fc8.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=bf360d81-79fb-4636-8241-0a843f228fc8 --state_path=/var/lib/neutron --metadata_port=8700 --verbose --log-file=neutron-ns-metadata-proxy-bf360d81-79fb-4636-8241-0a843f228fc8.log --log-dir=/var/log/neutron
root     21606  0.0  0.0 112676   920 pts/0    R+   12:27   0:00 grep --color=auto 4459

Outside routers namespace it would look like

(D) (to the actual Nova metadata service)

Run on Controller , hosting Neutron Server Service :-

[root@dfw02 ~(keystone_admin)]$ netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      2746/python

 Check process with pid  2746

[root@dfw02 ~(keystone_admin)]$ ps -ef | grep 2746

nova      2746     1  0 08:57 ?        00:02:31 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2830  2746  0 08:57 ?        00:00:00 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2851  2746  0 08:57 ?        00:00:10 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2858  2746  0 08:57 ?        00:00:02 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
root      9976 11489  0 16:31 pts/3    00:00:00 grep --color=auto 2746

So , we actually verified statement from Direct access to Nova metadata
in an environment running Neutron, a request from your instance must traverse a number of steps:

    1. From the instance to a router,(A)
    2. Through a NAT rule in the router namespace,  (B)
    3. To an instance of the neutron-ns-metadata-proxy, (C)
    4. To the actual Nova metadata service (D)

Troubleshooting inside routers namespace :-

[root@dfw02 ~(keystone_admin)]$ neutron router-list
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| id                                   | name    | external_gateway_info                                                       |
+--------------------------------------+---------+-----------------------------------------------------------------------------+
| 86b3008c-297f-4301-9bdc-766b839785f1 | router2 | {"network_id": "780ce2f3-2e6e-4881-bbac-857813f9a8e0", "enable_snat": true} |
| bf360d81-79fb-4636-8241-0a843f228fc8 | router1 | {"network_id": "780ce2f3-2e6e-4881-bbac-857813f9a8e0", "enable_snat": true} |
+--------------------------------------+---------+-------------------------------------------------------


[root@dfw02 ~(keystone_admin)]$ ip netns list
qrouter-86b3008c-297f-4301-9bdc-766b839785f1
qrouter-bf360d81-79fb-4636-8241-0a843f228fc8
qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7
qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b

Then select corresponding namespace  qrouter-86b3008c-297f-4301-9bdc-766b839785f1 

and run following commands :-

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 ifconfig

lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10
        loop  txqueuelen 0  (Local Loopback)
        RX packets 34  bytes 4008 (3.9 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 34  bytes 4008 (3.9 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-9c090153-08: flags=4163  mtu 1500
        inet 192.168.1.114  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::f816:3eff:feb2:24bc  prefixlen 64  scopeid 0x20
        ether fa:16:3e:b2:24:bc  txqueuelen 1000  (Ethernet)
        RX packets 261453  bytes 329837642 (314.5 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 104979  bytes 8514797 (8.1 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-e031db6b-d0: flags=4163  mtu 1500
        inet 40.0.0.1  netmask 255.255.255.0  broadcast 40.0.0.255
        inet6 fe80::f816:3eff:fe83:9090  prefixlen 64  scopeid 0x20
        ether fa:16:3e:83:90:90  txqueuelen 1000  (Ethernet)
        RX packets 106216  bytes 8579334 (8.1 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 236556  bytes 327026227 (311.8 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 ping -c 3 40.0.0.1
PING 40.0.0.1 (40.0.0.1) 56(84) bytes of data.
64 bytes from 40.0.0.1: icmp_seq=1 ttl=64 time=0.044 ms
64 bytes from 40.0.0.1: icmp_seq=2 ttl=64 time=0.036 ms
64 bytes from 40.0.0.1: icmp_seq=3 ttl=64 time=0.048 ms

--- 40.0.0.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.036/0.042/0.048/0.009 ms


[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1 ping -c 3 192.168.1.114
PING 192.168.1.114 (192.168.1.114) 56(84) bytes of data.
64 bytes from 192.168.1.114: icmp_seq=1 ttl=64 time=0.036 ms
64 bytes from 192.168.1.114: icmp_seq=2 ttl=64 time=0.048 ms
64 bytes from 192.168.1.114: icmp_seq=3 ttl=64 time=0.050 ms

--- 192.168.1.114 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.036/0.044/0.050/0.009 ms

References

1. OpenStack Networking concepts

Saturday, April 05, 2014

HowTo access metadata from RDO Havana Instance on Fedora 20 & OpenStack Networking concepts

OpenStack Networking concepts

The  OpenStack Networking components are deployed on the Controller, Compute, and Network nodes in the following configuration:

Controller node: may host the Neutron server service, which provides the networking API and communicates with and tracks the agents.
        DHCP agent: spawns and controls dnsmasq processes to provide leases to instances. This agent also spawns neutron-ns-metadata-proxy processes as part of the metadata system.
        Metadata agent: Provides a metadata proxy to the nova-api-metadata service. The neutron-ns-metadata-proxy direct traffic that they receive in their namespaces to the proxy.
        OVS plugin agent: Controls OVS network bridges and routes between them via patch, tunnel, or tap without requiring an external OpenFlow controller.
        L3 agent: performs L3 forwarding and NAT.

Otherwise a separate box hosts Neutron Server and all services mentioned above

Compute node: has an OVS plugin agent and openstack-nova-compute service.


Namespaces (View also Identifying and Troubleshooting Neutron Namespaces )

For each network you create, the Network node (or Controller node, if combined) will have a unique network namespace (netns) created by the DHCP and Metadata agents. The netns hosts an interface and IP addresses for dnsmasq and the neutron-ns-metadata-proxy. You can view the namespaces with the `ip netns list`  command, and can interact with the namespaces with the `ip netns exec namespace command`   command.

As mentioned in  Direct_access _to_Nova_metadata
in an environment running Neutron, a request from your instance must traverse a number of steps:

    1. From the instance to a router,
    2. Through a NAT rule in the router namespace,
    3. To an instance of the neutron-ns-metadata-proxy,
    4. To the actual Nova metadata service


   Reproducing  Dirrect_access_to_Nova_metadata   I was able to get  list of EC2 metadata available, but not their values. However,  my major concern was getting  values of metadata obtained in post Direct_access _to_Nova_metadata 
and also at  /openstack  location. The last  ones seem to me important not less then present  in EC2 list . Not all of  /openstack  metadata are provided by EC2 list.


Commands been run bellow are supposed to verify Nova&Neutron Set up to be performed  successfully , otherwise passing four steps 1,2,3,4 is supposed to fail and it will force you to analyse corresponding Logs file ( View References). It doesn't matter did you set up RDO Havana cloud environment  manually or via packstack
  
Run on Controller Node :-

[root@dallas1 ~(keystone_admin)]$ ip netns list
qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d
qdhcp-166d9651-d299-47df-a5a1-b368e87b612f

Check on the Routing on Cloud controller's router namespace, it should show
port 80 for 169.254.169.254 routes to the host at port 8700


[root@dallas1 ~(keystone_admin)]$ ip netns exec qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d iptables -L -t nat | grep 169

REDIRECT   tcp  --  anywhere             169.254.169.254      tcp dpt:http redir ports  8700


Check routing table inside the router namespace:

[root@dallas1 ~(keystone_admin)]$ ip netns exec qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d ip r

 default via 192.168.1.1 dev qg-8fbb6202-3d
10.0.0.0/24 dev qr-2dd1ba70-34  proto kernel  scope link  src 10.0.0.1
192.168.1.0/24 dev qg-8fbb6202-3d  proto kernel  scope link  src 192.168.1.100

[root@dallas1 ~(keystone_admin)]$ ip netns exec qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d netstat -na
Active Internet connections (servers and established)

Proto Recv-Q Send-Q Local Address           Foreign Address         State     
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN   

Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

[root@dallas1 ~(keystone_admin)]$ ip netns exec qdhcp-166d9651-d299-47df-a5a1-b368e87b612f netstat -na

Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State     
tcp        0      0 10.0.0.3:53             0.0.0.0:*               LISTEN    
tcp6       0      0 fe80::f816:3eff:feef:53 :::*                    LISTEN    
udp        0      0 10.0.0.3:53             0.0.0.0:*                         
udp        0      0 0.0.0.0:67              0.0.0.0:*                         
udp6       0      0 fe80::f816:3eff:feef:53 :::*                              
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

[root@dallas1 ~(keystone_admin)]$ iptables-save | grep 8700

-A INPUT -p tcp -m multiport --dports 8700 -m comment --comment "001 metadata incoming" -j ACCEPT


[root@dallas1 ~(keystone_admin)]$ netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      2830/python
  

[root@dallas1 ~(keystone_admin)]$ ps -ef | grep 2830
 

nova      2830     1  0 09:41 ?        00:00:57 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2856  2830  0 09:41 ?        00:00:00 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2874  2830  0 09:41 ?        00:00:09 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2875  2830  0 09:41 ?        00:00:01 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log



On another cluster


[root@dfw02 ~(keystone_admin)]$ ip netns list
qrouter-86b3008c-297f-4301-9bdc-766b839785f1
qrouter-bf360d81-79fb-4636-8241-0a843f228fc8

qdhcp-426bb226-0ab9-440d-ba14-05634a17fb2b
qdhcp-1eea88bb-4952-4aa4-9148-18b61c22d5b7

[root@dfw02 ~(keystone_admin)]$ netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      2746/python        
[root@dfw02 ~(keystone_admin)]$ ps -ef | grep 2746
nova      2746     1  0 08:57 ?        00:02:31 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2830  2746  0 08:57 ?        00:00:00 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2851  2746  0 08:57 ?        00:00:10 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2858  2746  0 08:57 ?        00:00:02 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
root      9976 11489  0 16:31 pts/3    00:00:00 grep --color=auto 2746



Inside namespaces output seems like this

[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-86b3008c-297f-4301-9bdc-766b839785f1  netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      4946/python        
[root@dfw02 ~(keystone_admin)]$ ps -ef | grep 4946
root      4946     1  0 08:58 ?        00:00:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/86b3008c-297f-4301-9bdc-766b839785f1.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=86b3008c-297f-4301-9bdc-766b839785f1 --state_path=/var/lib/neutron --metadata_port=8700 --verbose --log-file=neutron-ns-metadata-proxy-86b3008c-297f-4301-9bdc-766b839785f1.log --log-dir=/var/log/neutron
root     10396 11489  0 16:33 pts/3    00:00:00 grep --color=auto 4946


[root@dfw02 ~(keystone_admin)]$ ip netns exec qrouter-bf360d81-79fb-4636-8241-0a843f228fc8  netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      4746/python        
[root@dfw02 ~(keystone_admin)]$ ps -ef | grep 4746
root      4746     1  0 08:58 ?        00:00:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/bf360d81-79fb-4636-8241-0a843f228fc8.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=bf360d81-79fb-4636-8241-0a843f228fc8 --state_path=/var/lib/neutron --metadata_port=8700 --verbose --log-file=neutron-ns-metadata-proxy-bf360d81-79fb-4636-8241-0a843f228fc8.log --log-dir=/var/log/neutron


 1. At this point  you should be able (inside any running Havana instance) to launch your browser ("links" at least if there is no Light Weight X environment)  to

      http://169.254.169.254/openstack/latest (not EC2)

The response  will be  :    meta_data.json password vendor_data.json


    


   If Light Weight X Environment is unavailable then use "links"



 


  What is curl   http://curl.haxx.se/docs/faq.html#What_is_cURL

   Now you should be able to run on F20 instance


[root@vf20rs0404 ~] # curl http://169.254.169.254/openstack/latest/meta_data.json | tee meta_data.json

 % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                                 Dload  Upload   Total   Spent    Left  Speed
    100  1286  100  1286    0     0   1109      0  0:00:01  0:00:01 --:--:--  1127
                . . . . . . . .
                "uuid": "10142280-44a2-4830-acce-f12f3849cb32",
                "availability_zone": "nova",
                "hostname": "vf20rs0404.novalocal",
                "launch_index": 0,
                "public_keys": {"key2": "ssh-rsa . . . . .  Generated by Nova\n"},
                "name": "VF20RS0404"

On another instance (in my case Ubuntu 14.04 )

 root@ubuntutrs0407:~#curl http://169.254.169.254/openstack/latest/meta_data.json | tee meta_data.json

 Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                             Dload  Upload   Total   Spent    Left  Speed
 100  1292  100  1292    0     0    444      0  0:00:02  0:00:02 --:--:--   446
            {"random_seed": "...",
            "uuid": "8c79e60c-4f1d-44e5-8446-b42b4d94c4fc",
            "availability_zone": "nova",
            "hostname": "ubuntutrs0407.novalocal",
            "launch_index": 0,
            "public_keys": {"key2": "ssh-rsa .... Generated by Nova\n"},
            "name": "UbuntuTRS0407"}

Running VMs on Compute node:-

[root@dallas1 ~(keystone_boris)]$ nova list
+--------------------------------------+---------------+-----------+------------+-------------+-----------------------------+
| ID                                   | Name          | Status    | Task State | Power State | Networks                    |
+--------------------------------------+---------------+-----------+------------+-------------+-----------------------------+
| d0f947b1-ff6a-4ff0-b858-b63a3d07cca3 | UbuntuTRS0405 | SUSPENDED | None       | Shutdown    | int=10.0.0.7, 192.168.1.106 |
| 8c79e60c-4f1d-44e5-8446-b42b4d94c4fc | UbuntuTRS0407 | ACTIVE    | None       | Running     | int=10.0.0.6, 192.168.1.107 |
| 8775924c-dbbd-4fbb-afb8-7e38d9ac7615 | VF20RS037     | SUSPENDED | None       | Shutdown    | int=10.0.0.2, 192.168.1.115 |
| d22a2376-33da-4a0e-a066-d334bd2e511d | VF20RS0402    | SUSPENDED | None       | Shutdown    | int=10.0.0.4, 192.168.1.103 |
| 10142280-44a2-4830-acce-f12f3849cb32 | VF20RS0404    | ACTIVE    | None       | Running     | int=10.0.0.5, 192.168.1.105 |
+--------------------------------------+---------------+-----------+------------+-------------+--------------------

Launching browser to http://169.254.169.254/openstack/latest/meta_data.json on another Two Node Neutron GRE+OVS F20 Cluster. Output is sent directly to browser


  




2. I have provided some information about the OpenStack metadata api, which is available at /openstack, but if you are concerned  about the EC2 metadata API.
browser should be launched to  http://169.254.169.254/latest/meta-data/



   What allows to to get any of displayed parameters

    For instance :-
    



   OR via CLI
   
ubuntu@ubuntutrs0407:~$ curl  http://169.254.169.254/latest/meta-data/instance-id
i-000000a4

ubuntu@ubuntutrs0407:~$ curl  http://169.254.169.254/latest/meta-data/public-hostname
ubuntutrs0407.novalocal

ubuntu@ubuntutrs0407:~$ curl  http://169.254.169.254/latest/meta-data/public-ipv4
192.168.1.107

To verify instance-id launch virt-manger connected to Compute Node


  which shows same value "000000a4"

  Another option in text mode is "links" browser

   $ ssh -l ubuntu -i key2.pem 192.168.1.109
 
   Inside Ubuntu 14.04 instance  :-

   # apt-get -y install links
   # links

    Press ESC to get to menu:-
   

   
  

References

Monday, March 31, 2014

Attempt to reproduce "Direct access to Nova metadata per Lars Kellogg-Stedman"

Quoting  http://blog.oddbit.com/2014/01/14/direct-access-to-nova-metadata/

In an environment running Neutron, a request from your instance must traverse a number of steps:

    From the instance to a router,
    Through a NAT rule in the router namespace,
    To an instance of the neutron-ns-metadata-proxy,
    To the actual Nova metadata service

When there are problem accessing the metadata, it can be helpful to verify that the metadata service itself is configured correctly and returning meaningful information.

end quoting  and start reproducing on Controller of Two Node Neutron GRE+OVS+Gluster Fedora 20 Cluster


[root@dallas1 ~(keystone_admin)]$ ip netns list
qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d
qdhcp-166d9651-d299-47df-a5a1-b368e87b612f

Check on the Routing on Cloud controller's router namespace, it should show
port 80 for 169.254.169.254 routes to the host at port 8700


[root@dallas1 ~(keystone_admin)]$ ip netns exec qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d iptables -L -t nat | grep 169
REDIRECT   tcp  --  anywhere             169.254.169.254      tcp dpt:http redir ports  8700


Check routing table inside the router namespace:

[root@dallas1 ~(keystone_admin)]$ ip netns exec qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d ip r
default via 192.168.1.1 dev qg-8fbb6202-3d
10.0.0.0/24 dev qr-2dd1ba70-34  proto kernel  scope link  src 10.0.0.1
192.168.1.0/24 dev qg-8fbb6202-3d  proto kernel  scope link  src 192.168.1.100

[root@dallas1 ~(keystone_admin)]$ ip netns exec qrouter-cb80b040-f13f-4a67-a39e-353b1c873a0d netstat -na
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State     
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN    
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

[root@dallas1 ~(keystone_admin)]$ ip netns exec qdhcp-166d9651-d299-47df-a5a1-b368e87b612f netstat -na
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State     
tcp        0      0 10.0.0.3:53             0.0.0.0:*               LISTEN    
tcp6       0      0 fe80::f816:3eff:feef:53 :::*                    LISTEN    
udp        0      0 10.0.0.3:53             0.0.0.0:*                         
udp        0      0 0.0.0.0:67              0.0.0.0:*                         
udp6       0      0 fe80::f816:3eff:feef:53 :::*                              
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

[root@dallas1 ~(keystone_admin)]$ iptables-save | grep 8700
-A INPUT -p tcp -m multiport --dports 8700 -m comment --comment "001 metadata incoming" -j ACCEPT


[root@dallas1 ~(keystone_admin)]$ netstat -lntp | grep 8700
tcp        0      0 0.0.0.0:8700            0.0.0.0:*               LISTEN      2830/python        

[root@dallas1 ~(keystone_admin)]$ ps -ef | grep 2830
nova      2830     1  0 09:41 ?        00:00:57 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2856  2830  0 09:41 ?        00:00:00 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2874  2830  0 09:41 ?        00:00:09 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log
nova      2875  2830  0 09:41 ?        00:00:01 /usr/bin/python /usr/bin/nova-api --logfile /var/log/nova/api.log

Checks are done then follow http://blog.oddbit.com/2014/01/14/direct-access-to-nova-metadata/

[root@dallas1 ~]# grep shared_secret /etc/nova/nova.conf
neutron_metadata_proxy_shared_secret = fedora

[root@dallas1 ~]# . keystonerc_boris

[root@dallas1 ~(keystone_boris)]$ nova list
+--------------------------------------+--------------+-----------+------------+-------------+-----------------------------+
| ID                                   | Name         | Status    | Task State | Power State | Networks                    |
+--------------------------------------+--------------+-----------+------------+-------------+-----------------------------+
| 8543e339-724c-438e-80be-8259906ccf6d | UbuntuTRS005 | ACTIVE    | None       | Running     | int=10.0.0.6, 192.168.1.116 |
| 8bb32603-c27b-4665-a025-859f1a5bc04e | UbuntuTRS031 | SUSPENDED | None       | Shutdown    | int=10.0.0.5, 192.168.1.113 |
| 177ab5b8-c86b-44d8-aa50-b4b09cc46274 | VF20RS007    | SUSPENDED | None       | Shutdown    | int=10.0.0.4, 192.168.1.112 |
| a34ece35-afd2-466e-b591-93b269c8e41a | VF20RS017    | ACTIVE    | None       | Running     | int=10.0.0.7, 192.168.1.114 |
| 8775924c-dbbd-4fbb-afb8-7e38d9ac7615 | VF20RS037    | ACTIVE    | None       | Running     | int=10.0.0.2, 192.168.1.115 |
+--------------------------------------+--------------+-----------+------------+-------------+-----------------------------+

[root@dallas1 ~(keystone_boris)]$ python
Python 2.7.5 (default, Feb 19 2014, 13:47:28)
[GCC 4.8.2 20131212 (Red Hat 4.8.2-7)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
 >>>timport hmac
 >>> import hashlib
>>> hmac.new('fedora','8543e339-724c-438e-80be-8259906ccf6d',hashlib.sha256).hexdigest()
'c31469feb2b865d76285612331d009bf2b1109674bf4cb745954f1e482c62e7f'
>>>

# exit
#. keystonerc_admin

[root@dallas1 ~(keystone_admin)]$ keystone tenant-list
+----------------------------------+----------+---------+
|                id                |   name   | enabled |
+----------------------------------+----------+---------+
| 28d7e48acf74466e84fbb3cbd53c1ccb |  admin   |   True  |
| e896be65e94a4893b870bc29ba86d7eb | ostenant |   True  |
| 2c28cccb99fd4939a5af03548089ab07 | services |   True  |
+----------------------------------+----------+---------+
exit

# sudo su -
[root@dallas1 ~]# . keystonerc_boris


[root@dallas1 ~(keystone_boris)]$ curl  \
   -H  'x-instance-id: 8543e339-724c-438e-80be-8259906ccf6d' \
   -H  'x-tenant-id: e896be65e94a4893b870bc29ba86d7eb' \
   -H  'x-instance-id-signature: c31469feb2b865d76285612331d009bf2b1109674bf4cb745954f1e482c62e7f' \
    http://localhost:8700/latest/meta-data
ami-id
ami-launch-index
ami-manifest-path
block-device-mapping/
hostname
instance-action
instance-id
instance-type
kernel-id
local-hostname
local-ipv4
placement/
public-hostname
public-ipv4
public-keys/
ramdisk-id
reservation-id

Snapshots with different VMs involved :-