Revision history [back]

click to hide/show revision 1
initial version

centos7 trove version 1:6.0.0-1.el7

[DEFAULT] log_dir = /var/log/trove notifier_queue_hostname = controller01 rpc_backend = rabbit # Show debugging output in logs (sets DEBUG log level output) debug = True

# Update the service and instance statuses if the instances fails to become # active within the configured usage_timeout. usage_timeout = 3600 # restore_usage_timeout = 36000 update_status_on_fail = True

#================= RPC Configuration ================================

# URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url=<none>

# The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit

# The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove

#DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api

# Configuration options for talking to nova via the novaclient. trove_auth_url = http://controller01:5000/v3.0 nova_compute_url = http://controller01:8774/v2 cinder_url = http://controller01:8776/v1 swift_url = http://controller01:8080/v1/AUTH_ neutron_url = http://controller01:9696/

# nova_compute_url, cinder_url, swift_url, and heat_url can all be fetched # from Keystone. To fetch from Keystone, comment out nova_compute_url, # cinder_url, swift_url, and heat_url and optionally uncomment the lines below.

# Region name of this node. Used when searching catalog. Default value is None. #os_region_name = RegionOne # Service type to use when searching catalog. #nova_compute_service_type = compute # Service type to use when searching catalog. #cinder_service_type = volumev2 # Service type to use when searching catalog. #swift_service_type = object-store # Service type to use when searching catalog. #heat_service_type = orchestration # Service type to use when searching catalog. #neutron_service_type = network

# Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb mount_point = /var/lib/mysql volume_time_out=30 server_delete_time_out=480

# Nova server boot options # sets the --config-drive argument when doing a nova boot # (controls how file injection is handled by nova) use_nova_server_config_drive = True

# Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, # basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = openstack nova_proxy_admin_tenant_id = 1c7b790d9fd148ef97008ba3c153c1ed #nova_proxy_admin_tenant_name = service

# Manager impl for the taskmanager taskmanager_manager=trove.taskmanager.manager.Manager

# Manager sends Exists Notifications exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer exists_notification_ticks = 30 notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b

# Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1:5000/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns

# Neutron network_driver = trove.network.neutron.NeutronDriver default_neutron_networks =

# Trove Security Groups for Instances trove_security_groups_support = True trove_security_group_rule_cidr = 0.0.0.0/0

# Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 agent_replication_snapshot_timeout = 36000

# Whether to use nova's contrib api for create server with volume use_nova_server_volume = False

# Config option for filtering the IP address that DNS uses # For nova-network, set this to the appropriate network label defined in nova # For neutron, set this to .* since users can specify custom network labels # You can also optionally specify regex'es to match the actual IP addresses # ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = .* #ip_regex = ^(15.|123.) #black_list_regex = ^(10.0.0.)

# Datastore templates template_path = /etc/trove/templates/

# ============ Notification System configuration ===========================

# Sets the notification driver used by oslo.messaging. Options include # messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop

# Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications

# ============ Logging information ============================= #log_dir = /integration/report log_file = trove-taskmanager.log

# ============ PyDev remote dubugging =============================

# Enable or disable pydev remote debugging. # There are three values allowed: 'disabled', 'enabled' and 'auto' # If value is 'auto' tries to connect to remote debugger server, # but in case of error continue running with disabled debugging pydev_debug = disabled

# remote debug server host and port options #pydev_debug_host = localhost #pydev_debug_port = 5678

# path to pydevd library. It will be used if pydevd is absent in sys.path #pydev_path = <path>

# ================= Guestagent related ======================== #guest_config = /etc/trove/trove-guestagent.conf # Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility #guest_info = guest_info.conf # Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility #injected_config_location = /etc/trove/conf.d #cloudinit_location = /etc/trove/cloudinit

[database] connection = mysql+pymysql://trove:openstack@controller01/trove # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine #connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove # connection = mysql+pymysql://root:root@localhost/trove

# Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default wait_timeout of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600

# ================= Security groups related ======================== # Each future datastore implementation should implement # its own oslo group with defined in it: # - tcp_ports; upd_ports;

[profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True

[oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host rabbit_host=controller01

# The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672

# RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port

# Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false

# The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid rabbit_userid=openstack

# The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=openstack

# The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/

[mysql] # Whether to permit ICMP. default is False. icmp = True # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb

[redis] # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 6379 # redis uses local storage volume_support = False # default device_path = None

[cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb

[couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb

[mongodb] volume_support = True device_path = /dev/vdb

[vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb mount_point = /var/lib/vertica taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy

centos7 trove version 1:6.0.0-1.el7 after reinstall i dont get Trove Error creating security group for instance now i see its stuck on build

[DEFAULT] log_dir = /var/log/trove notifier_queue_hostname = controller01 rpc_backend = rabbit # Show debugging output in logs (sets DEBUG log level output) debug = True

# Update the service and instance statuses if the instances fails to become # active within the configured usage_timeout. usage_timeout = 3600 # restore_usage_timeout = 36000 update_status_on_fail = True

#================= RPC Configuration ================================

# URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url=<none>

# The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit

# The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove

#DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api

# Configuration options for talking to nova via the novaclient. trove_auth_url = http://controller01:5000/v3.0 nova_compute_url = http://controller01:8774/v2 cinder_url = http://controller01:8776/v1 swift_url = http://controller01:8080/v1/AUTH_ neutron_url = http://controller01:9696/

# nova_compute_url, cinder_url, swift_url, and heat_url can all be fetched # from Keystone. To fetch from Keystone, comment out nova_compute_url, # cinder_url, swift_url, and heat_url and optionally uncomment the lines below.

# Region name of this node. Used when searching catalog. Default value is None. #os_region_name = RegionOne # Service type to use when searching catalog. #nova_compute_service_type = compute # Service type to use when searching catalog. #cinder_service_type = volumev2 # Service type to use when searching catalog. #swift_service_type = object-store # Service type to use when searching catalog. #heat_service_type = orchestration # Service type to use when searching catalog. #neutron_service_type = network

# Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb mount_point = /var/lib/mysql volume_time_out=30 server_delete_time_out=480

# Nova server boot options # sets the --config-drive argument when doing a nova boot # (controls how file injection is handled by nova) use_nova_server_config_drive = True

# Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, # basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = openstack nova_proxy_admin_tenant_id = 1c7b790d9fd148ef97008ba3c153c1ed #nova_proxy_admin_tenant_name = service

# Manager impl for the taskmanager taskmanager_manager=trove.taskmanager.manager.Manager

# Manager sends Exists Notifications exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer exists_notification_ticks = 30 notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b

# Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1:5000/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns

# Neutron network_driver = trove.network.neutron.NeutronDriver default_neutron_networks =

# Trove Security Groups for Instances trove_security_groups_support = True trove_security_group_rule_cidr = 0.0.0.0/0

# Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 agent_replication_snapshot_timeout = 36000

# Whether to use nova's contrib api for create server with volume use_nova_server_volume = False

# Config option for filtering the IP address that DNS uses # For nova-network, set this to the appropriate network label defined in nova # For neutron, set this to .* since users can specify custom network labels # You can also optionally specify regex'es to match the actual IP addresses # ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = .* #ip_regex = ^(15.|123.) #black_list_regex = ^(10.0.0.)

# Datastore templates template_path = /etc/trove/templates/

# ============ Notification System configuration ===========================

# Sets the notification driver used by oslo.messaging. Options include # messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop

# Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications

# ============ Logging information ============================= #log_dir = /integration/report log_file = trove-taskmanager.log

# ============ PyDev remote dubugging =============================

# Enable or disable pydev remote debugging. # There are three values allowed: 'disabled', 'enabled' and 'auto' # If value is 'auto' tries to connect to remote debugger server, # but in case of error continue running with disabled debugging pydev_debug = disabled

# remote debug server host and port options #pydev_debug_host = localhost #pydev_debug_port = 5678

# path to pydevd library. It will be used if pydevd is absent in sys.path #pydev_path = <path>

# ================= Guestagent related ======================== #guest_config = /etc/trove/trove-guestagent.conf # Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility #guest_info = guest_info.conf # Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility #injected_config_location = /etc/trove/conf.d #cloudinit_location = /etc/trove/cloudinit

[database] connection = mysql+pymysql://trove:openstack@controller01/trove # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine #connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove # connection = mysql+pymysql://root:root@localhost/trove

# Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default wait_timeout of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600

# ================= Security groups related ======================== # Each future datastore implementation should implement # its own oslo group with defined in it: # - tcp_ports; upd_ports;

[profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True

[oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host rabbit_host=controller01

# The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672

# RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port

# Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false

# The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid rabbit_userid=openstack

# The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=openstack

# The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/

[mysql] # Whether to permit ICMP. default is False. icmp = True # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb

[redis] # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 6379 # redis uses local storage volume_support = False # default device_path = None

[cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb

[couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb

[mongodb] volume_support = True device_path = /dev/vdb

[vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb mount_point = /var/lib/vertica taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy

centos7 trove version 1:6.0.0-1.el7 after reinstall i dont get Trove Error creating security group for instance now i see its stuck on build

[DEFAULT] log_dir = /var/log/trove notifier_queue_hostname = controller01 rpc_backend = rabbit # Show debugging output in logs (sets DEBUG log level output) debug = True

# Update the service and instance statuses if the instances fails to become # active within the configured usage_timeout. usage_timeout = 3600 # restore_usage_timeout = 36000 update_status_on_fail = True

#================= RPC Configuration ================================

# URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url=<none>

# The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit

# The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove

#DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api

# Configuration options for talking to nova via the novaclient. trove_auth_url = http://controller01:5000/v3.0 nova_compute_url = http://controller01:8774/v2 cinder_url = http://controller01:8776/v1 swift_url = http://controller01:8080/v1/AUTH_ neutron_url = http://controller01:9696/

# nova_compute_url, cinder_url, swift_url, and heat_url can all be fetched # from Keystone. To fetch from Keystone, comment out nova_compute_url, # cinder_url, swift_url, and heat_url and optionally uncomment the lines below.

# Region name of this node. Used when searching catalog. Default value is None. #os_region_name = RegionOne # Service type to use when searching catalog. #nova_compute_service_type = compute # Service type to use when searching catalog. #cinder_service_type = volumev2 # Service type to use when searching catalog. #swift_service_type = object-store # Service type to use when searching catalog. #heat_service_type = orchestration # Service type to use when searching catalog. #neutron_service_type = network

# Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb mount_point = /var/lib/mysql volume_time_out=30 server_delete_time_out=480

# Nova server boot options # sets the --config-drive argument when doing a nova boot # (controls how file injection is handled by nova) use_nova_server_config_drive = True

# Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, # basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = openstack nova_proxy_admin_tenant_id = 1c7b790d9fd148ef97008ba3c153c1ed #nova_proxy_admin_tenant_name = service

# Manager impl for the taskmanager taskmanager_manager=trove.taskmanager.manager.Manager

# Manager sends Exists Notifications exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer exists_notification_ticks = 30 notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b

# Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1:5000/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns

# Neutron network_driver = trove.network.neutron.NeutronDriver default_neutron_networks =

# Trove Security Groups for Instances trove_security_groups_support = True trove_security_group_rule_cidr = 0.0.0.0/0

# Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 agent_replication_snapshot_timeout = 36000

# Whether to use nova's contrib api for create server with volume use_nova_server_volume = False

# Config option for filtering the IP address that DNS uses # For nova-network, set this to the appropriate network label defined in nova # For neutron, set this to .* since users can specify custom network labels # You can also optionally specify regex'es to match the actual IP addresses # ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = .* #ip_regex = ^(15.|123.) #black_list_regex = ^(10.0.0.)

# Datastore templates template_path = /etc/trove/templates/

# ============ Notification System configuration ===========================

# Sets the notification driver used by oslo.messaging. Options include # messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop

# Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications

# ============ Logging information ============================= #log_dir = /integration/report log_file = trove-taskmanager.log

# ============ PyDev remote dubugging =============================

# Enable or disable pydev remote debugging. # There are three values allowed: 'disabled', 'enabled' and 'auto' # If value is 'auto' tries to connect to remote debugger server, # but in case of error continue running with disabled debugging pydev_debug = disabled

# remote debug server host and port options #pydev_debug_host = localhost #pydev_debug_port = 5678

# path to pydevd library. It will be used if pydevd is absent in sys.path #pydev_path = <path>

# ================= Guestagent related ======================== #guest_config = /etc/trove/trove-guestagent.conf # Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility #guest_info = guest_info.conf # Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility #injected_config_location = /etc/trove/conf.d #cloudinit_location = /etc/trove/cloudinit

[database] connection = mysql+pymysql://trove:openstack@controller01/trove # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine #connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove # connection = mysql+pymysql://root:root@localhost/trove

# Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default wait_timeout of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600

# ================= Security groups related ======================== # Each future datastore implementation should implement # its own oslo group with defined in it: # - tcp_ports; upd_ports;

[profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True

[oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host rabbit_host=controller01

# The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672

# RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port

# Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false

# The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid rabbit_userid=openstack

# The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=openstack

# The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/

[mysql] # Whether to permit ICMP. default is False. icmp = True # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb

[redis] # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 6379 # redis uses local storage volume_support = False # default device_path = None

[cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb

[couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb

[mongodb] volume_support = True device_path = /dev/vdb

[vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb mount_point = /var/lib/vertica taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy

centos7 trove version 1:6.0.0-1.el7 after reinstall i dont get Trove Error creating security group for instance now i see its stuck on build

[DEFAULT]
log_dir = /var/log/trove
notifier_queue_hostname = controller01
rpc_backend = rabbit
# \# Show debugging output in logs (sets DEBUG log level output)
debug = True

# True \# Update the service and instance statuses if the instances fails to become # \# active within the configured usage_timeout. usage_timeout = 3600 # \# restore_usage_timeout = 36000 update_status_on_fail = True

#================= True \#================= RPC Configuration ================================

# ================================ \# URL representing the messaging driver to use and its full configuration. # \# If not set, we fall back to the 'rpc_backend' option and driver specific # \# configuration. #transport_url=<none>

# \#transport_url=<None> \# The messaging driver to use. Options include rabbit, qpid and zmq. # \# Default is rabbit. (string value) #rpc_backend=rabbit

# \#rpc_backend=rabbit \# The default exchange under which topics are scoped. May be # \# overridden by an exchange name specified in the 'transport_url option. control_exchange = trove

#DB trove \#DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api

# trove.db.sqlalchemy.api \# Configuration options for talking to nova via the novaclient. trove_auth_url = http://controller01:5000/v3.0 nova_compute_url = http://controller01:8774/v2 cinder_url = http://controller01:8776/v1 swift_url = http://controller01:8080/v1/AUTH_ neutron_url = http://controller01:9696/

# http://controller01:9696/ \# nova_compute_url, cinder_url, swift_url, and heat_url can all be fetched # \# from Keystone. To fetch from Keystone, comment out nova_compute_url, # \# cinder_url, swift_url, and heat_url and optionally uncomment the lines below.

# below. \# Region name of this node. Used when searching catalog. Default value is None. #os_region_name \#os_region_name = RegionOne # \# Service type to use when searching catalog. #nova_compute_service_type \#nova_compute_service_type = compute # \# Service type to use when searching catalog. #cinder_service_type \#cinder_service_type = volumev2 # \# Service type to use when searching catalog. #swift_service_type \#swift_service_type = object-store # \# Service type to use when searching catalog. #heat_service_type \#heat_service_type = orchestration # \# Service type to use when searching catalog. #neutron_service_type = network

# \#neutron_service_type = network \# Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb mount_point = /var/lib/mysql volume_time_out=30 server_delete_time_out=480

# server_delete_time_out=480 \# Nova server boot options # \# sets the --config-drive argument when doing a nova boot # \# (controls how file injection is handled by nova) use_nova_server_config_drive = True

# True \# Configuration options for talking to nova via the novaclient. # \# These options are for an admin user in your keystone config. # \# It proxy's the token received from the user to send to nova via this admin users creds, # \# basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = openstack nova_proxy_admin_tenant_id = 1c7b790d9fd148ef97008ba3c153c1ed #nova_proxy_admin_tenant_name = service

# \#nova_proxy_admin_tenant_name = service \# Manager impl for the taskmanager taskmanager_manager=trove.taskmanager.manager.Manager

# taskmanager_manager=trove.taskmanager.manager.Manager \# Manager sends Exists Notifications exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer exists_notification_ticks = 30 notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b

# mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b \# Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1:5000/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns

# dns \# Neutron network_driver = trove.network.neutron.NeutronDriver default_neutron_networks =

# = \# Trove Security Groups for Instances trove_security_groups_support = True trove_security_group_rule_cidr = 0.0.0.0/0

# 0.0.0.0/0 \# Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 agent_replication_snapshot_timeout = 36000

# 36000 \# Whether to use nova's contrib api for create server with volume use_nova_server_volume = False

# False \# Config option for filtering the IP address that DNS uses # \# For nova-network, set this to the appropriate network label defined in nova # \# For neutron, set this to .* since users can specify custom network labels # \# You can also optionally specify regex'es to match the actual IP addresses # \# ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = .* #ip_regex \#ip_regex = ^(15.|123.) #black_list_regex = ^(10.0.0.)

# \#black_list_regex = ^(10.0.0.) \# Datastore templates template_path = /etc/trove/templates/

# /etc/trove/templates/ \# ============ Notification System configuration ===========================

# =========================== \# Sets the notification driver used by oslo.messaging. Options include # \# messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop

# \# notification_driver=noop \# Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications

# \# notification_topics=notifications \# ============ Logging information ============================= #log_dir \#log_dir = /integration/report log_file = trove-taskmanager.log

# trove-taskmanager.log \# ============ PyDev remote dubugging =============================

# ============================= \# Enable or disable pydev remote debugging. # \# There are three values allowed: 'disabled', 'enabled' and 'auto' # \# If value is 'auto' tries to connect to remote debugger server, # \# but in case of error continue running with disabled debugging pydev_debug = disabled

# disabled \# remote debug server host and port options #pydev_debug_host \#pydev_debug_host = localhost #pydev_debug_port = 5678

# \#pydev_debug_port = 5678 \# path to pydevd library. It will be used if pydevd is absent in sys.path #pydev_path = <path>

# \#pydev_path = <path> \# ================= Guestagent related ======================== #guest_config \#guest_config = /etc/trove/trove-guestagent.conf # \# Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility #guest_info \#guest_info = guest_info.conf # \# Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility #injected_config_location \#injected_config_location = /etc/trove/conf.d #cloudinit_location = /etc/trove/cloudinit

\#cloudinit_location = /etc/trove/cloudinit [database] connection = mysql+pymysql://trove:openstack@controller01/trove # \# SQLAlchemy connection string for the reference implementation # \# registry server. Any valid SQLAlchemy connection string is fine. # \# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine #connection \#connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove # \# connection = mysql+pymysql://root:root@localhost/trove

# mysql+pymysql://root:root@localhost/trove \# Period in seconds after which SQLAlchemy should reestablish its connection # \# to the database. # # \# \# MySQL uses a default wait_timeout `wait_timeout` of 8 hours, after which it will drop # \# idle connections. This can result in 'MySQL Gone Away' exceptions. If you # \# notice this, you can lower this value to ensure that SQLAlchemy reconnects # \# before MySQL can drop the connection. idle_timeout = 3600

# 3600 \# ================= Security groups related ======================== # \# Each future datastore implementation should implement # \# its own oslo group with defined in it: # \# - tcp_ports; upd_ports;

upd_ports; [profiler] # \# If False fully disable profiling feature. #enabled \#enabled = False # \# If False doesn't trace SQL requests. #trace_sqlalchemy = True

\#trace_sqlalchemy = True [oslo_messaging_rabbit] # \# The RabbitMQ broker address where a single node is used. (string value) # \# Deprecated group/name - [DEFAULT]/rabbit_host rabbit_host=controller01

# rabbit_host=controller01 \# The RabbitMQ broker port where a single node is used. (integer value) # \# Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672

# \#rabbit_port=5672 \# RabbitMQ HA cluster host:port pairs. (list value) # \# Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port

# \#rabbit_hosts=$rabbit_host:$rabbit_port \# Connect over SSL for RabbitMQ. (boolean value) # \# Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false

# \#rabbit_use_ssl=false \# The RabbitMQ userid. (string value) # \# Deprecated group/name - [DEFAULT]/rabbit_userid rabbit_userid=openstack

# rabbit_userid=openstack \# The RabbitMQ password. (string value) # \# Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=openstack

# rabbit_password=openstack \# The RabbitMQ virtual host. (string value) # \# Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/

\#rabbit_virtual_host=/ [mysql] # \# Whether to permit ICMP. default is False. icmp = True # \# Format (single port or port range): A, B-C # \# where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb

/dev/vdb [redis] # \# Format (single port or port range): A, B-C # \# where C greater than B tcp_ports = 6379 # \# redis uses local storage volume_support = False # \# default device_path = None

None [cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb

/dev/vdb [couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb

/dev/vdb [mongodb] volume_support = True device_path = /dev/vdb

/dev/vdb [vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb mount_point = /var/lib/vertica taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy

trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy