Ask Your Question
0

Ceph Backend Poor Perfomance

asked 2018-03-05 09:24:55 -0500

sauloaugustosilva gravatar image

Hi all ,

I have experience a poor performance from CEPH after I migrated from Mitaka to Ocata . I did some testing doing same running of bonnie++ and monitoring the network utilization . I have a 8 nodes nova-compute services configured with follow setup :

[DEFAULT]
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.10.10.XXX
use_neutron = True
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
firewall_driver = nova.virt.firewall.NoopFirewallDriver
notification_topics = notifications,notifications_designate
notification_driver=ceilometer.compute.nova_notifier
compute_driver = libvirt.LibvirtDriver
scheduler_default_filters = AllHostsFilter
live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
debug = True
verbose = True
rootwrap_config = /etc/nova/rootwrap.conf
vif_plugging_is_fatal = false
vif_plugging_timeout = 0 
allow_resize_to_same_host = True
#live_migration_uri = kvm+ssh://nova@%s/system
[api_database]
connection = mysql+pymysql://nova:PASSWORD@openstack/nova_api
[barbican]
[cache]
[cells]
[cinder]
os_region_name = RegionOne
[conductor]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://nova:PASSWORD@openstack/nova
[ephemeral_storage_encryption]
[glance]
api_servers = http://10.10.10.101:9292
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://openstack:5000
auth_url = http://openstack:35357
memcached_servers = openstack:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova  
password = PASSWORD
[libvirt]
virt_type = kvm
images_rbd_pool = vms
images_type = rbd
rbd_secret_uuid = c791e4ea-59fe-4f6e-9571-84b38fa800b2
rbd_user = cinder
disk_cachemodes = "network=writeback"
hw_disk_discard = unmap
live_migration_flag= "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
images_rbd_ceph_conf = /etc/ceph/ceph.conf
inject_partition = -1
[matchmaker_redis]
[metrics]
[neutron]
url = http://openstack:9696
auth_url = http://openstack:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = PASSWORD
nova_medata_ip = openstack2
metadata_proxy_shared_secret = PASSWORD
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = openstack
rabbit_userid = openstack
rabbit_password = "PASSWORD"
[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.10.10.XXX
novncproxy_base_url = https://openstack:6080/vnc_auto.html
[workarounds]
disable_libvirt_livesnapshot = False
[xenserver]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://openstack:35357/v3
username = placement
password = placementsenha
[oslo_messaging_notifications]
driver = messagingv2

and with ceph.conf

[global]
fsid = 35099003-4a39-XXXc-XXXXX-e55f8670ceb2
mon initial members = ceph01,ceph02,ceph03
mon host = 10.10.18.230:6789, 10.10.18.231:6789,10.10.18.210:6789
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
filestore xattr use omap = true
public network = 10.10.18.0/24
cluster network = 10.10.19.0/24
max open files = 131072
osd pool default size = 2
osd crush chooseleaf type = 1
osd pool default min size = 2 
osd pool default pg num = 64
osd pool default pgp num = 64
mon clock drift allowed = 30
debug mon = 10

mon_osd_min_down_reporters = 10
osd_heartbeat_grace = 60
osd_mon_report_interval_min = 15
osd_mon_ack_timeout = 90

[mon]
mon data = /var/lib/ceph/mon/ceph-$id
[osd]
fstype = xfs
osd mkfs type = xfs
osd data = /var/lib/ceph/osd/ceph-$id
osd journal = /var/lib/ceph/osd/ceph-$id/journal
osd mount options xfs = rw,noatime,inode64,logbsize=256k
osd op threads = 8

[mds]
mds cache size = 250000

[client]
rbd cache = true
rbd cache size = 268435456 
rbd cache max dirty = 0
rbd cache target dirty = 134217728
rbd cache max dirty age = 2
rbd cache writethrough until flush = true
rbd concurrent management ops = 20
admin ...
(more)
edit retag flag offensive close merge delete

1 answer

Sort by ยป oldest newest most voted
0

answered 2018-03-07 06:54:16 -0500

sauloaugustosilva gravatar image

I solved my problem add another interface and physically split the cluster and ceph network traffics . Now the source of VM is providing the higher outgoing traffic .

edit flag offensive delete link more

Your Answer

Please start posting anonymously - your entry will be published after you log in or create a new account.

Add Answer

Get to know Ask OpenStack

Resources for moderators

Question Tools

1 follower

Stats

Asked: 2018-03-05 09:24:55 -0500

Seen: 186 times

Last updated: Mar 07 '18