Ask Your Question
3

VM instance can't reach external network. NATed packet not return.

asked 2014-09-05 08:52:10 -0500

edgl gravatar image

updated 2014-09-21 05:42:44 -0500

Hello,

I have setup of 3 servers:
- Controller
- Compute
- Storage

# neutron net-list
extnet01 10.61.4.192/26
private01 192.168.101.0/24

VM IP: 192.168.101.17
DHCP IP: 192.168.101.13
Neutron router internal IP: 192.168.101.1
Neutron router external IP: 10.61.4.194
Physical gateway: 10.61.4.193

# ip netns

qrouter-dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0
qdhcp-eb7fbd59-27df-4d36-ae5e-3cc2e48c3511


# QROUTER namespace
# ip netns exec qrouter-dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0 ip a

74: qr-edc0f100-bd: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
    link/ether fa:16:3e:0a:cd:d0 brd ff:ff:ff:ff:ff:ff
    inet 192.168.101.1/24 brd 192.168.101.255 scope global qr-edc0f100-bd
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe0a:cdd0/64 scope link
       valid_lft forever preferred_lft forever
75: qg-b04f2489-1f: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
    link/ether fa:16:3e:a7:29:d4 brd ff:ff:ff:ff:ff:ff
    inet 10.61.4.194/26 brd 10.61.4.255 scope global qg-b04f2489-1f
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fea7:29d4/64 scope link
       valid_lft forever preferred_lft forever


# CONTROLER OVS-VSCTL
# ovs-vsctl show
qr- and qg- are in the br-int bridge, and tagged with different vlan tags.

74cea2c7-67dc-421b-9e63-3a415b1b04a1
    Bridge br-int
        fail_mode: secure
        Port "qr-edc0f100-bd"
            tag: 1
            Interface "qr-edc0f100-bd"
                type: internal
        Port "qg-b04f2489-1f"
            tag: 2
            Interface "qg-b04f2489-1f"
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port int-br-ex
            Interface int-br-ex
        Port br-int
            Interface br-int
                type: internal
        Port "tap1a0b65dc-f4"
            tag: 1
            Interface "tap1a0b65dc-f4"
                type: internal
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port phy-br-ex
            Interface phy-br-ex
        Port "enp21s0f0.43"
            Interface "enp21s0f0.43"
    Bridge br-tun
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port "vxlan-c0a86402"
            Interface "vxlan-c0a86402"
                type: vxlan
                options: {in_key=flow, local_ip="192.168.100.1", out_key=flow, remote_ip="192.168.100.2"}
        Port br-tun
            Interface br-tun
                type: internal
    ovs_version: "2.1.3"


# BR-TUN FLOWS (Controller)

 1(patch-int): addr:56:be:61:85:00:a7
 2(vxlan-0a000003): addr:b6:03:f5:96:36:e6
 3(vxlan-c0a86402): addr:c2:c2:2a:4d:78:e7
 4(vxlan-c0a86404): addr:4a:93:cc:5a:5a:14
 LOCAL(br-tun): addr:fa:73:27:33:4c:48

NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=3446.279s, table=0, n_packets=0, n_bytes=0, idle_age=3446, priority=0 actions=drop
 cookie=0x0, duration=3445.508s, table=0, n_packets=552, n_bytes=41156, idle_age=1008, priority=1,in_port=3 actions=resubmit(,3)
 cookie=0x0, duration=3446.327s, table=0, n_packets=625, n_bytes=71926, idle_age=172, priority=1,in_port=1 actions=resubmit(,1)
 cookie=0x0, duration=3445.360s, table=0, n_packets=388, n_bytes=41553, idle_age=172, priority=1,in_port=4 actions=resubmit(,3)
 cookie=0x0, duration=3445.658s, table=0, n_packets=0, n_bytes=0, idle_age=3445, priority=1,in_port=2 actions=resubmit(,3)
 cookie=0x0, duration=3446.181s, table=1, n_packets=45, n_bytes=3880, idle_age=901, priority=1,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,21)
 cookie=0x0, duration=3446.230s, table=1, n_packets ...
(more)
edit retag flag offensive close merge delete

Comments

What says :
For your particular tenant ( say boris)
[root@icehouse1 ~(keystone_boris)]# nova secgroup-list-rules default
[root@icehouse1 ~(keystone_admin)]# ip netns exec qrouter-router-id iptables -S -t nat | grep 169.254

dbaxps gravatar imagedbaxps ( 2014-09-05 09:40:49 -0500 )edit

What says :
[root@icehouse1 ~(keystone_admin)]# ip netns exec qrouter-router-id netstat -antp
[root@icehouse1 ~(keystone_admin)]# neutron router-list
[root@icehouse1 ~(keystone_admin)]# ip netns exec qrouter-router-id iptables -S -t nat

dbaxps gravatar imagedbaxps ( 2014-09-05 09:43:26 -0500 )edit

How to fix issues with metadata_agent and metadata_proxy ? I have same problem with you- can' curl 169.254.169.254

leethaoqn gravatar imageleethaoqn ( 2014-09-07 21:17:45 -0500 )edit

Read all the comments for this question and you will get your problem fixed.

edgl gravatar imageedgl ( 2014-09-08 02:07:50 -0500 )edit

please share me your config about network node

leethaoqn gravatar imageleethaoqn ( 2014-09-08 02:20:40 -0500 )edit

2 answers

Sort by ยป oldest newest most voted
1

answered 2014-09-06 11:39:20 -0500

edgl gravatar image

At the moment I only have "admin" and "demo" tenants.

[root@blade23 ~(keystone_admin)]# nova secgroup-list-rules default
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range  | Source Group |
+-------------+-----------+---------+-----------+--------------+
| udp         | 1         | 65535   | 0.0.0.0/0 |              |
| icmp        | -1        | -1      | 0.0.0.0/0 |              |
| tcp         | 1         | 65535   | 0.0.0.0/0 |              |
+-------------+-----------+---------+-----------+--------------+


[root@blade23 ~(keystone_admin)]# neutron router-list
+--------------------------------------+-------------+-----------------------------------------------------------------------------+
| id                                   | name        | external_gateway_info                                                       |
+--------------------------------------+-------------+-----------------------------------------------------------------------------+
| dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0 | extrouter01 | {"network_id": "b69ff53a-1041-4624-ae0b-c859fadec128", "enable_snat": true} |
+--------------------------------------+-------------+-----------------------------------------------------------------------------+


[root@blade23 ~(keystone_admin)]# ip netns exec qrouter-dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0 iptables -S -t nat | grep 169.254
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697


root      5361  0.0  0.0 263200 24648 ?        S    Sep04   0:00 /usr/bin/python /bin/neutron-ns-metadata-proxy --pid_file=/var/lib/neutron/external/pids/dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0.pid --metadata_proxy_socket=/var/lib/neutron/metadata_proxy --router_id=dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0 --state_path=/var/lib/neutron --metadata_port=9697 --debug --verbose --log-file=neutron-ns-metadata-proxy-dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0.log --log-dir=/var/log/neutron

neutron  22286  0.0  0.0 280000 34188 ?        Ss   Sep04   0:15 /usr/bin/python /usr/bin/neutron-metadata-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini --log-file /var/log/neutron/metadata-agent.log


I see that metadata-proxy is started with option "--metadata_port=9697", checked logs, and it says metadata proxy is starting

[root@blade23 ~(keystone_admin)]# tail -f /var/log/neutron/neutron-ns-metadata-proxy-dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0.log
2014-09-06 12:34:46.900 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.max_overflow          = 20 log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.901 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.max_pool_size         = 10 log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.901 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.max_retries           = 10 log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.901 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.min_pool_size         = 1 log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.901 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.pool_timeout          = 10 log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.902 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.retry_interval        = 10 log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.902 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] database.slave_connection      =  log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1953
2014-09-06 12:34:46.902 10467 DEBUG neutron.agent.metadata.namespace_proxy [-] ******************************************************************************** log_opt_values /usr/lib/python2.7/site-packages/oslo/config/cfg.py:1955
2014-09-06 12:34:46.909 10473 INFO neutron.wsgi [-] (10473) wsgi starting up on http://0.0.0.0:9697/

But the service is not listening. And there are no error messages in logs. It just crash silently?

netstat -anlp  |grep 5361


metadata-agent is listening

[root@blade23 ~(keystone_admin)]# netstat -anlp | grep 22286
tcp        0      0 10.61.4.130:60244       10.61.4.130:5672        ESTABLISHED 22286/python
tcp        0      0 10.61.4.130:60245       10.61.4.130:5672        ESTABLISHED 22286/python
unix  2      [ ACC ]     STREAM     LISTENING     122836   22286/python ...
(more)
edit flag offensive delete link more

Comments

$ netstat -antp | grep 8775
$ ip netns exec qrouter-router-id netstat -antp

dbaxps gravatar imagedbaxps ( 2014-09-06 11:46:44 -0500 )edit

[root@blade23 ~(keystone_admin)]# netstat -antp | grep 8775 tcp 0 0 0.0.0.0:8775 0.0.0.0:* LISTEN 21198/python

nova-api is listening on this port.

edgl gravatar imageedgl ( 2014-09-06 11:49:28 -0500 )edit

ip netns exec qrouter-dbd8ca7a-4ab1-40b5-accb-9e1d4ac7d2d0 netstat -antp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:9697

edgl gravatar imageedgl ( 2014-09-06 11:52:00 -0500 )edit

$ps -ef | grep 21198

dbaxps gravatar imagedbaxps ( 2014-09-06 11:52:15 -0500 )edit

# ps -ef | grep 21198
nova 21198 1 0 Sep04 ? 00:26:07 /usr/bin/python /usr/bin/nova-api
nova 21206 21198 0 Sep04 ? 00:00:00 /usr/bin/python /usr/bin/nova-api
<...> a lot of nova-api threads

edgl gravatar imageedgl ( 2014-09-06 11:55:13 -0500 )edit
1

answered 2014-09-06 12:19:32 -0500

dbaxps gravatar image

updated 2014-09-21 10:23:32 -0500

Forced to use this field due to leack formatting options in comments .

[root@icehouse1 neutron(keystone_admin)]# cat  metadata_agent.ini | grep -v ^$ | grep -v ^#
    [DEFAULT]
    debug = False
    auth_url = http://192.168.1.127:35357/v2.0
    auth_region = RegionOne
    auth_insecure = False
    admin_tenant_name = services
    admin_user = neutron
    admin_password = 5f11f559abc94440
    nova_metadata_ip = 192.168.1.127
    nova_metadata_port = 8775
    metadata_proxy_shared_secret =227f7bbc8b6f4f74
    metadata_workers = 0

Tune:-

[root@icehouse1 nova(keystone_admin)]# cat nova.conf | grep service_neutron_metadata | grep -v ^#
service_neutron_metadata_proxy=True
[root@icehouse1 nova(keystone_admin)]# cat nova.conf | grep metadata_proxy_shared_secret | grep -v ^#
neutron_metadata_proxy_shared_secret=227f7bbc8b6f4f74

Run :-

From within VM
$ curl http://169.254.169.254/latest/meta-data/
$ curl http://169.254.169.254/latest/meta-data/instance-id
$ curl http://169.254.169.254/latest/meta-data/local-ipv4

Due to most recent feed problem solved after tuning Ubuntu's nova.conf&&metadata_agent.ini files per version generated by packstack

You should always have in your iptables
$ ip netns exec qrouter-c5087a90-8b27-4c12-9e79-3672520ad049 iptables -S -t nat| grep 169.254
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697
Your VM should always have access to 169.254.169.254
Otherwise , cloud-init will fail to run.

Fix /etc/sysctl.conf :-

net.ipv4.ip_forward=1
That's all.

Port qg-xxxx should be attached to br-ex , not br-int . Working sample bellow

[root@icehouse1 ~(keystone_admin)]# ovs-vsctl show
50a2dcb7-9502-4c08-b175-563eec368db9
    Bridge br-int
        fail_mode: secure
        Port br-int
            Interface br-int
                type: internal
        Port "tap707ec6ff-71"
            tag: 1
            Interface "tap707ec6ff-71"
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qr-6825e25c-60"
            tag: 2
            Interface "qr-6825e25c-60"
                type: internal
        Port "qr-4ebac49b-e9"
            tag: 3
            Interface "qr-4ebac49b-e9"
                type: internal
        Port "tap6b3d1487-a3"
            tag: 2
            Interface "tap6b3d1487-a3"
                type: internal
        Port "tapc094ce87-74"
            tag: 3
            Interface "tapc094ce87-74"
                type: internal
    Bridge br-tun
        Port br-tun
            Interface br-tun
                type: internal
        Port "gre-c0a80189"
            Interface "gre-c0a80189"
                type: gre
                options: {in_key=flow, local_ip="192.168.1.127", out_key=flow, remote_ip="192.168.1.137"}
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
    Bridge br-ex
        Port "qg-4dda9e91-32"
            Interface "qg-4dda9e91-32"
                type: internal
        Port br-ex
            Interface br-ex
                type: internal
        Port "qg-acc3f483-f4"
            Interface "qg-acc3f483-f4"
                type: internal
        Port "p37p1"
            Interface "p37p1"
    ovs_version: "2.3.0"

Per your request /etc/neutron/l3_agent.ini

[root@icehouse1 ~(keystone_admin)]# cat  /etc/neutron/l3_agent.ini | grep -v ^# | grep -v ^$
[DEFAULT]
debug = False
interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces = True
handle_internal_only_routers = True
external_network_bridge = br-ex
metadata_port = 9697
send_arp_for_ha = 3
periodic_interval = 40
periodic_fuzzy_delay = 5
enable_metadata_proxy = True
router_delete_namespaces = False
edit flag offensive delete link more

Comments

i have exactly the same problem, did you have some progress for it? I noticed by some others they have the 169... at its routing table of the demorouter but not for me. How can i manage this and can it be the problem?

n7rxDE gravatar imagen7rxDE ( 2014-09-07 12:48:51 -0500 )edit

here is my question which belongs to that...

https://ask.openstack.org/en/question/46704/does-anyone-have-working-icehouse-3-node-setup-with-neutron-networking-ml2-ovs-gre/ (https://ask.openstack.org/en/question...)

n7rxDE gravatar imagen7rxDE ( 2014-09-07 12:53:01 -0500 )edit

yeah here my conf:

net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.all.rp_filter = 0
net.ipv4.ip_forward = 1
leethaoqn gravatar imageleethaoqn ( 2014-09-08 03:04:58 -0500 )edit

I noticed that my specific log for the namespace proxy is empty?!

n7rxDE gravatar imagen7rxDE ( 2014-09-08 03:05:38 -0500 )edit

this is already setup for me it is unbelieable that i can reach other hosts in ext but cannot telnet the web... and for me also NATed packet gets back to the router

n7rxDE gravatar imagen7rxDE ( 2014-09-08 03:15:43 -0500 )edit

Your Answer

Please start posting anonymously - your entry will be published after you log in or create a new account.

Add Answer

Get to know Ask OpenStack

Resources for moderators

Question Tools

4 followers

Stats

Asked: 2014-09-05 08:52:10 -0500

Seen: 1,795 times

Last updated: Sep 21 '14