Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Launching Instance on VMware shows Guest OS Not Found

We have a 3 Node Openstack Havana Setup as follows: Controller : 172.16.17.17 Compute: 172.16.17.2 Network Node: 172.16.17.18

I configured VCenter(5.1) as compute with the ubuntu havana controller manually following the formal document provided by openstack (VOVA not used). I am trying to provision Centos 6.3 64 bit Image and the provisioning is successful, However, The Deployed VM does not have a guest OS installed even though the VM state is active in Openstack and the VM is running in backend.

The VM has the message as No Operating System Found.

PS: The Image was created as per the openstack image creation guidelines (persistent setting removed, ssh keys removed, firewall disabled and vmware tools installed before shutting it down)

The promiscous mode is set to Accept for the switch that is being used to be assigned for the VM.

Any Help to overcome the above issue is appreciated.

Following are the config files for your reference: nova.conf (on Controller)

[DEFAULT]
#dhcpbridge_flagfile=/etc/nova/nova.conf
#dhcpbridge=/usr/bin/nova-dhcpbridge
#logdir=/var/log/nova
#state_path=/var/lib/nova
#lock_path=/var/lock/nova
#force_dhcp_release=True
#iscsi_helper=tgtadm
#libvirt_use_virtio_for_bridges=True
#connection_type=libvirt
#root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
#verbose=True
#ec2_private_dns_show_ip=True
#api_paste_config=/etc/nova/api-paste.ini
#volumes_path=/var/lib/nova/volumes
#enabled_apis=ec2,osapi_compute,metadata

logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/run/lock/nova
verbose=True
api_paste_config=/etc/nova/api-paste.ini
compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
rabbit_host=172.16.17.17
rabbit_password=root123
nova_url=http://172.16.17.17:8774/v1.1/
sql_connection=mysql://novaUser:novaPass@172.16.17.17/nova
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
rpc_backend = nova.rpc.impl_kombu

# Auth
use_deprecated_auth=false
auth_strategy=keystone

# Imaging service
glance_api_servers=172.16.17.17:9292
image_service=nova.image.glance.GlanceImageService

# Vnc configuration
novnc_enabled=true
novncproxy_base_url=http://172.16.17.110:6080/vnc_auto.html
novncproxy_port=6080
vncserver_proxyclient_address=172.16.17.17
vncserver_listen=0.0.0.0

# Network settings
network_api_class=nova.network.neutronv2.api.API
neutron_url=http://172.16.17.17:9696
neutron_auth_strategy=keystone
neutron_admin_tenant_name=service
neutron_admin_username=neutron
neutron_admin_password=service_pass
neutron_admin_auth_url=http://172.16.17.17:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

#If you want Neutron + Nova Security groups

firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=neutron

#If you want Nova Security groups only, comment the two lines above and uncomment line -1-.

#-1-firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver



#Metadata

service_neutron_metadata_proxy = True

neutron_metadata_proxy_shared_secret = helloOpenStack



# Compute #
#For VMware
compute_driver=vmwareapi.VMwareVCDriver

# For KVM:

#compute_driver=libvirt.LibvirtDriver

# Cinder #

volume_api_class=nova.volume.cinder.API
osapi_volume_listen_port=5900

/etc/cinder/cinder.conf (On Controller)

[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
iscsi_helper = ietadm
sql_connection = mysql://cinderUser:cinderPass@172.16.17.17/cinder
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
iscsi_ip_address=172.16.17.17
rpc_backend = cinder.openstack.common.rpc.impl_kombu
rabbit_host = 172.16.17.17
rabbit_port = 5672
rabbit_userid = guest
rabbit_password = root123

/etc/neutron/neutron.conf (On Controller)

[DEFAULT]
# Default log level is INFO
# verbose and debug has the same result.
# One of them will set DEBUG log level output
# debug = False
# verbose = False

# Where to store Neutron state files.  This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron

# Where to store lock files
lock_path = $state_path/lock

# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S

# use_syslog                           -> syslog
# log_file and log_dir                 -> log_dir/log_file
# (not log_file) and log_dir           -> log_dir/{binary_name}.log
# use_stderr                           -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors                       -> notification system

# use_syslog = False
# syslog_log_facility = LOG_USER

# use_stderr = True
# log_file =
# log_dir =

# publish_errors = False

# Address to bind the API server
# bind_host = 0.0.0.0

# Port the bind the API server to
# bind_port = 9696

# Path to the extensions.  Note that this can be a colon-separated list of
# paths.  For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =

# Neutron plugin provider module
core_plugin = neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2

# Advanced service modules
# service_plugins =

# Paste configuration file
# api_paste_config = api-paste.ini

# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone

# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00

# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16

# DHCP Lease duration (in seconds)
# dhcp_lease_duration = 86400

# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True

# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False


# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron

# If passed, use a fake RabbitMQ provider
# fake_rabbit = False

# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)'
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
rabbit_host = 172.16.17.17
# Password of the RabbitMQ server
rabbit_password = root123
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false

# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True

# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *

# ============ Notification System Options =====================

# Notifications can be sent when network/subnet/port are create, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)

# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver. DHCP agents needs it.
notification_driver = neutron.openstack.common.notifier.rpc_notifier

# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO

# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host

# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
# notification_topics = notifications

# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1

# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5

# Maximum number of host routes per subnet
# max_subnet_host_routes = 20

# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5

# =========== items for agent management extension =============
# Seconds to regard the agent as down.
# agent_down_time = 5
# ===========  end of items for agent management extension =====

# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler

# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True

# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True

# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1

# ===========  end of items for agent scheduler extension =====

# =========== WSGI parameters related to the API server ==============
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600

# Number of seconds to keep retrying to listen
# retry_until_window = 30

# Number of backlog requests to configure the socket with.
# backlog = 4096

# Enable SSL on the API server
# use_ssl = False

# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile

# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile

# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========

[quotas]
# resource name(s) that are supported in quota features
# quota_items = network,subnet,port

# default number of resource allowed per tenant, minus for unlimited
# default_quota = -1

# number of networks allowed per tenant, and minus means unlimited
# quota_network = 10

# number of subnets allowed per tenant, and minus means unlimited
# quota_subnet = 10

# number of ports allowed per tenant, and minus means unlimited
# quota_port = 50

# number of security groups allowed per tenant, and minus means unlimited
# quota_security_group = 10

# number of security group rules allowed per tenant, and minus means unlimited
# quota_security_group_rule = 100

# default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver

[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf

# =========== items for agent management extension =============
# seconds between nodes reporting state to server, should be less than
# agent_down_time
# report_interval = 4

# ===========  end of items for agent management extension =====

[keystone_authtoken]
#auth_host = 127.0.0.1
auth_host = 172.16.17.17
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = neutron
admin_password = service_pass
signing_dir = /var/lib/neutron/keystone-signing
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
#signing_dir = $state_path/keystone-signing

[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
#connection = sqlite:////var/lib/neutron/neutron.sqlite

# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =

# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10

# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10

# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1

# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10

# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600

# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20

# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0

# Add python stack traces to SQL as comment strings
# connection_trace = False

# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10

[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service type include LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# this is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default

/etc/nova/nova.conf (On Compute Node)

[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_type=vmwareapi
libvirt_use_virtio_for_bridges=True
connection_type=vmwareapi
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
rpc_backend = nova.rpc.impl_kombu
rabbit_host=172.16.17.17
rabbit_password=root123
#compute_driver=vmwareapi.VMwareVCDriver
compute_driver=vmwareapi.VMwareESXDriver
auth_strategy=keystone
my_ip=172.16.17.2
vnc_enabled=True
vncserver_listen=172.16.17.2
vncserver_proxyclient_address=172.16.17.2
novncproxy_base_url=http://172.16.17.2:6080/vnc_auto.html
glance_host=172.16.17.17
glance_api_servers=172.16.17.17:9292
image_service=nova.image.glance.GlanceImageService
volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver


#network_manager=nova.network.manager.FlatDHCPManager
#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
#network_size=254
#allow_same_net_traffic=False
multi_host=True
#send_arp_for_ha=True
#share_dhcp_address=True
#force_dhcp_release=True
#flat_network_bridge=br-int
#vlan_interface=vmnic1
#flat_interface=eth1
#public_interface=eth1

# Network settings
network_api_class=nova.network.neutronv2.api.API
neutron_url=http://172.16.17.17:9696
neutron_auth_strategy=keystone
neutron_admin_tenant_name=service
neutron_admin_username=neutron
neutron_admin_password=service_pass
neutron_admin_auth_url=http://172.16.17.17:35357/v2.0
neutron_ovs_bridge=br100
#libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
#linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
vmwareapi_vif_driver=nova.virt.vmwareapi.vif.VMwarelanBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver


#If you want Neutron + Nova Security groups
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=neutron

service_neutron_metadata_proxy = True
neutron_metadata_proxy_shared_secret = helloOpenStack
metadata_host = 172.16.17.17
metadata_listen = 127.0.0.1
metadata_listen_port = 8775


volume_api_class=nova.volume.cinder.API

osapi_volume_listen_port=5900

cinder_catalog_info=volume:cinder:internalURL

[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://novaUser:novaPass@172.16.17.17/nova

[keystone_authtoken]
auth_host = 172.16.17.17
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = service_pass

[vmware]
#host_ip=148.147.163.210
#host_username=openstackuser
#host_password=Pa55word
#cluster_name=OpenstackCluster
host_ip=172.16.14.250
host_username=root
host_password=esxroot
wsdl_location=http://127.0.0.1:8080/vmware/SDK/vsphere-ws/wsdl/vim25/vimService.wsdl
datastore_regex=datastore1
vmwareapi_vif_driver=nova.virt.vmwareapi.vif.VMwarelanBridgeDriver
vmware_api_retry_count=10
vmwareapi_task_poll_interval=5.0
integration_bridge=br100
vlan_interface=vmnic1
clone_type=full