Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Cannot attached Ceph volume block to instance

hi guys,

i'm trying to get openstack to work with ceph. i manage to create volume block using openstack dahsboard but when i try to attached the volume to a running instance, it gives me an error on the dashboard "unable to attached volume:.

i have try searching around the logs for clues but i couldn't find anything related to the fail action to attached the volume. can someone help me on how to and where should i look for clues related to this failure. fyi, i'm able to create,edit and delete volume block from dahsboard without any issue.

thanks senyapsudah

Cannot attached Ceph volume block to instance

hi guys,

i'm trying to get openstack to work with ceph. i manage to create volume block using openstack dahsboard but when i try to attached the volume to a running instance, it gives me an error on the dashboard "unable to attached volume:.

i have try searching around the logs for clues but i couldn't find anything related to the fail action to attached the volume. can someone help me on how to and where should i look for clues related to this failure. fyi, i'm able to create,edit and delete volume block from dahsboard without any issue.

openstack: icehouse ceph:firefly

please find below are my nova.conf

[DEFAULT]
#libvirt_type=qemu
debug=true
verbose=true
auth_strategy = keystone
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
#volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
scheduler_default_filters=AllHostsFilter

rpc_backend = rabbit
rabbit_host = localhost
rabbit_password = guest

allow_resize_to_same_host = True
allow_migrate_to_same_host = True
allow_instance_snapshots = True


metadata_host=192.168.8.41
metadata_listen=0.0.0.0
metadata_listen_port=8775
metadata_manager=nova.api.manager.MetadataManager
metadata_port=8775

my_ip = localhost
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = localhost
novncproxy_base_url = http://192.168.8.41:6080/vnc_auto.html

glance_host = localhost

network_api_class = nova.network.neutronv2.api.API
neutron_url = http://localhost:9696
neutron_auth_strategy = keystone
neutron_admin_tenant_name = service
neutron_admin_username = neutron
neutron_admin_password = abc
neutron_admin_auth_url = http://localhost:35357/v2.0
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
service_neutron_metadata_proxy = true
neutron_metadata_proxy_shared_secret=abc123


#libvirt_images_type=rbd
#libvirt_images_rbd_pool=vms
#libvirt_images_rbd_ceph_conf=/etc/ceph/ceph.conf

volume_driver=nova.volume.driver.RBDDriver
rbd_pool=volumes
#images_type = rbd
rbd_user=cinder
rbd_secret_uuid=4cceac22-1a44-497a-8bc0-d49bbf7144df


backup_driver=cinder.backup.drivers.ceph
backup_ceph_conf=/etc/ceph/ceph.conf
backup_ceph_user=cinder-backup
backup_ceph_chunk_size=134217728
backup_ceph_pool=backups
backup_ceph_stripe_unit=0
backup_ceph_stripe_count=0
restore_discard_excess_bytes=true

libvirt_inject_password=false
libvirt_inject_key=false
libvirt_inject_partition=-2

[database]
connection = mysql://nova:abc@localhost/nova

[keystone_authtoken]
auth_uri = http://localhost:5000
auth_host = localhost
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = abc
root@testgraph:/etc/init#

this is my cinder.conf

[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
#iscsi_helper = tgtadm
#volume_name_template = volume-%s
#volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes

rpc_backend = rabbit
rabbit_host = localhost
rabbit_port = 5672
rabbit_userid = guest
rabbit_password = guest

volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_pool=volumes
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot=false
rbd_max_clone_depth=5
glance_api_version=2
rbd_user=cinder
rbd_secret_uuid=4cceac22-1a44-497a-8bc0-d49bbf7144df

[database]
connection = mysql://cinder:abc@localhost/cinder

[keystone_authtoken]
auth_uri = http://localhost:5000
auth_host = localhost
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = cinder
admin_password = abc

i'm currently testing internally before i move it to productions.

thanks senyapsudah