Ask Your Question
0

Unable to create VM in Horizon but with cli it is working (Queens).

asked 2018-07-25 03:19:38 -0500

full_moon gravatar image

updated 2018-07-25 04:09:46 -0500

Openstack is configured to use CEPH to store images and instances.

Here is an error in the cinder scheduler.log file when trying to create a VM with Horizon:

[root@controller cinder]# tail scheduler.log
2018-07-18 11:09:20.291 5923 ERROR cinder.scheduler.flows.create_volume [req-571494bb-9c07-484d-982c-1e3e41f2fd90
d70bba6d5cdc44d9a3bd5fa2e192af12 73861796b710486a878e7ffa92124f11 - default default] Failed to run task cinder.scheduler.flows.create_volume.ScheduleCreateVolumeTask;volume:create:
No valid backend was found. No weighed backends available: NoValidBackend: No valid backend was found. No weighed backends available

Error message (pop up) in interface:

Error: Failed to perform requested operation on instance "test", the instance has an error status: Please try again later [Error: Build of instance 2f42049a-2f16-4ae8-bb82-5b84ad3f2633 aborted: Volume ebbd5b1e-2424-4215-94b0-e12a32535f18 did not finish being created even after we waited 0 seconds or 1 attempts. And its status is error.].

Content of cinder.conf

[DEFAULT]
transport_url = rabbit://openstack:PASSWORD@controller
my_ip = IP_CONTROLLER
auth_strategy = keystone
glance_api_version = 2
enabled_backends = ceph
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:PASSWORD@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_id = default
user_domain_name = default
project_name = service
username = cinder
password = PASSWORD
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[service_user]
[ssl]
[vault]
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_cluster_name = ceph
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_pool = 2reps
rbd_user = cinder
rbd_secret_uuid = eb0229f3-d7f6-41b5-a144-c57aa168e972
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1

content of nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:PASSWORD@controller
my_ip = IP
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:PASSWORD@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:PASSWORD@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = PASSWORD
[libvirt]
virt_type=qemu
inject_password=false
inject_key=false
inject_partition=-2
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
disk_cachemodes = "network=writeback"
images_type=rbd
images_rbd_pool=2reps
images_rbd_ceph_conf = /etc/ceph/ceph.conf
hw_disk_discard=unmap
rbd_user=cinder
rbd_secret_uuid=eb0229f6-d7f5-41b5-a144-c57aa168e972
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = PASSWORD
service_metadata_proxy = true
metadata_proxy_shared_secret = Sasczfgbrtbgzdscvbrtgn5445edf546544z4fv4r56gj44ezc4v4vhyjs
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PASSWORD
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen =$my_ip
server_proxyclient_address = $my_ip
novncproxy_base_url=http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]

Content of ceph.conf:

[global]
fsid = 3b6e49b9-0f4c-4f8b-b4b8-f5fd12e58c04
mon_initial_members = mon1 mon2 mon3
mon_host = IP1 IP2 IP3
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 250
osd pool default pgp num ...
(more)
edit retag flag offensive close merge delete

1 answer

Sort by » oldest newest most voted
0

answered 2018-07-25 04:44:36 -0500

updated 2018-07-25 04:44:59 -0500

The CLI works because it doesn't create a volume. Horizon fails because, by default, it does create a volume, and something is wrong with your Cinder setup. I am not sure but the fact that there is no default_volume_type in cinder.conf might be the reason.

Horizon's instance launch dialogue has a switch in the source tab that prevents volume creation. If you use that switch, you should be able to launch an instance. Ultimately, though, you will want to fix your Cinder config.

edit flag offensive delete link more

Comments

Indeed, it is working when adding

default_volume_type = ceph

in the Default section of cinder.conf.

I don't know why it is working without this line with a Pike version of Openstack.

full_moon gravatar imagefull_moon ( 2018-07-25 06:24:56 -0500 )edit

This is speculation, but other versions could implement a default backend even without this setting.

Bernd Bausch gravatar imageBernd Bausch ( 2018-07-25 06:39:42 -0500 )edit

Your Answer

Please start posting anonymously - your entry will be published after you log in or create a new account.

Add Answer

Get to know Ask OpenStack

Resources for moderators

Question Tools

1 follower

Stats

Asked: 2018-07-25 03:19:38 -0500

Seen: 207 times

Last updated: Jul 25 '18