Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Unable to launch instances

I have installed openstack ocata using the official documentation. In this newer release of openstack, they have introduced a concept of placement-api which is giving me all the trouble.

Whenever i try to launch an instance, i get a "No Valid Hosts was found".

However, interesting to note are the below o/p.

[root@controller conf.d]# nova hypervisor-list
/usr/lib/python2.7/site-packages/novaclient/client.py:278: UserWarning: The 'tenant_id' argument is deprecated in Ocata and its use may result in errors in future releases. As 'project_id' is provided, the 'tenant_id' argument will be ignored.
  warnings.warn(msg)
+----+-----------------------------+-------+---------+
| ID | Hypervisor hostname         | State | Status  |
+----+-----------------------------+-------+---------+
| 1  | compute3.os.nfv.comviva.com | up    | enabled |
| 2  | compute2.os.nfv.comviva.com | up    | enabled |
+----+-----------------------------+-------+---------+


[root@controller conf.d]# nova service-list
/usr/lib/python2.7/site-packages/novaclient/client.py:278: UserWarning: The 'tenant_id' argument is deprecated in Ocata and its use may result in errors in future releases. As 'project_id' is provided, the 'tenant_id' argument will be ignored.
  warnings.warn(msg)
+----+------------------+-------------------------------+----------+---------+-------+----------------------------+-----------------+
| Id | Binary           | Host                          | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+----+------------------+-------------------------------+----------+---------+-------+----------------------------+-----------------+
| 9  | nova-consoleauth | controller.os.nfv.comviva.com | internal | enabled | up    | 2017-04-24T07:25:28.000000 | -               |
| 10 | nova-scheduler   | controller.os.nfv.comviva.com | internal | enabled | up    | 2017-04-24T07:25:28.000000 | -               |
| 11 | nova-conductor   | controller.os.nfv.comviva.com | internal | enabled | up    | 2017-04-24T07:25:29.000000 | -               |
| 14 | nova-compute     | compute3.os.nfv.comviva.com   | nova     | enabled | up    | 2017-04-24T07:25:31.000000 | -               |
| 15 | nova-compute     | compute2.os.nfv.comviva.com   | nova     | enabled | up    | 2017-04-24T07:25:28.000000 | -               |
+----+------------------+-------------------------------+----------+---------+-------+----------------------------+-----------------+

Also, there is no placement-api service running as such.

Below is my nova.conf file for the controller and the compute node.

nova.conf file controller node

[DEFAULT]
transport_url = rabbit://openstack:rabbit123@controller
debug = True
instances_path=$state_path/instances
my_ip = 172.19.10.31
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
state_path=/var/lib/nova
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
[api]
[api_database]
connection = mysql+pymysql://nova:nova123@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
connection = mysql+pymysql://nova:nova123@controller/nova
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova123
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://controller:9696
region_name = RegionOne
service_metadata_proxy = True
metadata_proxy_shared_secret = metasecret
auth_type = password
auth_url = http://controller:35357
project_name = service
project_domain_name = default
username = neutron
user_domain_name = default
password = neutron123
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = rabbit123
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
auth_type = password
auth_url = http://controller:35357/v3
project_name = service
project_domain_name = default
username = placement
user_domain_name = default
password = nova123
[placement_database]
connection = mysql+pymysql://nova:nova123@controller/nova_api
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]

nova.conf contents on the compute node.

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:rabbit123@controller
instance_usage_audit_period = hour
compute_driver=libvirt.LibvirtDriver
instances_path=$state_path/instances
instance_usage_audit = True
my_ip = 172.19.10.253
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
state_path=/var/lib/nova
rpc_backend = rabbit
auth_strategy = keystone
notify_on_state_change = vm_and_task_state
[api]
auth_strategy=keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova123
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://controller:9696
region_name = RegionOne
auth_type = password
auth_url = http://controller:35357
project_name = service
project_domain_name = default
username = neutron
user_domain_name = default
password = neutron123
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver = messagingv2
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = rabbit123
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
auth_type = password
auth_url = http://controller:35357/v3
project_name = service
project_domain_name = default
username = placement
user_domain_name = default
password = nova123
[placement_database]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]

Also this is the error message that i get in the nova-scheduler logs.

2017-04-24 12:44:19.097 31150 DEBUG oslo_messaging._drivers.amqpdriver [-] received message msg_id: 1a6655ca8545471cab81a522970299c8 reply to reply_1b6e6cd1498243
c785c7a105f634632b __call__ /usr/lib/python2.7/site-packages/oslo_messaging/_drivers/amqpdriver.py:194
2017-04-24 12:44:19.307 31150 ERROR nova.scheduler.client.report [req-35cd747e-1478-445a-abb7-e6c196519aca - - - - -] Failed to retrieve filtered list of resource
 providers from placement API for filters {'resources': 'DISK_GB:20,MEMORY_MB:2048,VCPU:1'}. Got 404: <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>404 Not Found</title>
</head><body>
<h1>Not Found</h1>
<p>The requested URL /placement/resource_providers was not found on this server.</p>
</body></html>
.
2017-04-24 12:44:19.308 31150 DEBUG nova.filters [req-35cd747e-1478-445a-abb7-e6c196519aca - - - - -] Starting with 0 host(s) get_filtered_objects /usr/lib/python
2.7/site-packages/nova/filters.py:70

Has anyone faced a similar issue with openstack-ocata?