Hello,
I meet seem problem but my diffrence is I use "gre" tunnel .
Once enable the population , all of the nodes missed the tunnel connection inforamation in "br-tun" ovs bridge. but disable population all of the tunel info are back, so what's I missed?
I have added the "arp_responder = True" but it still don't work.
[agent]
arp_responder = True
[root@compute1 ~]# ovs-vsctl show
4e2b9403-3251-481a-86eb-2a9272c9cf09
Bridge br-tun
Port br-tun
Interface br-tun
type: internal
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Bridge br-ex
Port "eth2"
Interface "eth2"
Port br-ex
Interface br-ex
type: internal
Port "fg-a67ad606-43"
Interface "fg-a67ad606-43"
type: internal
Bridge br-int
fail_mode: secure
Port int-br-ex
Interface int-br-ex
type: patch
options: {peer=phy-br-ex}
Port "qr-85097111-fe"
tag: 1
Interface "qr-85097111-fe"
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port "qvofb7c2370-ee"
tag: 1
Interface "qvofb7c2370-ee"
ovs_version: "2.3.1-git3282e51"
[root@network0 ~]# ovs-vsctl show
d21e3790-d015-44e4-ab97-8ee91beda7b2
Bridge br-ex
Port "qg-4cf25f12-ed"
Interface "qg-4cf25f12-ed"
type: internal
Port "eth2"
Interface "eth2"
Port br-ex
Interface br-ex
type: internal
Port phy-br-ex
Interface phy-br-ex
type: patch
options: {peer=int-br-ex}
Bridge br-int
fail_mode: secure
Port int-br-ex
Interface int-br-ex
type: patch
options: {peer=phy-br-ex}
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port "qr-85097111-fe"
tag: 1
Interface "qr-85097111-fe"
type: internal
Port "tap6dac23c1-16"
tag: 1
Interface "tap6dac23c1-16"
type: internal
Port br-int
Interface br-int
type: internal
Port "sg-faa9cf1a-54"
tag: 1
Interface "sg-faa9cf1a-54"
type: internal
Bridge br-tun
Port br-tun
Interface br-tun
type: internal
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
ovs_version: "2.3.1-git3282e51"
[root@compute1 ~]# cat /etc/neutron/neutron.conf |grep -v ^$|grep -v ^#
[DEFAULT]
rpc_backend = rabbit
rabbit_host = controller0
rabbit_password = openstack
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
dvr_base_mac = fa:16:3f:00:00:00
router_distributed = True
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://controller0:5000/v2.0
identity_uri = http://controller0:35357
admin_tenant_name = service
admin_user = neutron
admin_password = neutron
[database]
[service_providers]
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
[root@compute1 ~]# cat /etc/neutron/plugins/ml2/ml2_conf.ini |grep -v ^$|grep -v ^#
[ml2]
type_drivers = flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = external
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.0.1.31
enable_tunneling = True
[agent]
tunnel_types = gre
l2_population = True
enable_distributed_routing = True
arp_responder = True
[root@compute1 ~]# cat /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini |grep -v ^$|grep -v ^#
[ovs]
enable_tunneling = True
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = 10.0.1.31
[agent]
enable_distributed_routing = True
arp_responder = True
l2_population = True
[securitygroup]
[root@compute1 ~]# cat /etc/neutron/l3_agent.ini |grep -v ^$|grep -v ^#
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces = True
external_network_bridge = br-ex
verbose = True
debug = True
agent_mode = dvr
[root@network0 ~]# cat /etc/neutron/neutron.conf |grep -v ^$|grep -v ^#
[DEFAULT]
rpc_backend = rabbit
rabbit_host = controller0
rabbit_password = openstack
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
dvr_base_mac = fa:16:3f:00:00:00
router_distributed = True
debug = True
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://controller0:5000/v2 ...
(more)
Assuming ML2 + OVS >= 2.1. On each compute node, in the conf file you pass to the OVS agent (plugin.ini / ml2_conf.ini):