Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Yet another "no valid host available"

I successfully launched four m1.medium. Henceforth, any new instance turns up as ERROR. I'll answer the usual battery of questions that get asked.

[root@kvm8 ~(keystone_admin)]# virt-host-validate
  QEMU: Checking for hardware virtualization                                 : PASS
  QEMU: Checking if device /dev/kvm exists                                   : PASS
  QEMU: Checking if device /dev/kvm is accessible                            : PASS
  QEMU: Checking if device /dev/vhost-net exists                             : PASS
  QEMU: Checking if device /dev/net/tun exists                               : PASS
  QEMU: Checking for cgroup 'memory' controller support                      : PASS
  QEMU: Checking for cgroup 'memory' controller mount-point                  : PASS
  QEMU: Checking for cgroup 'cpu' controller support                         : PASS
  QEMU: Checking for cgroup 'cpu' controller mount-point                     : PASS
  QEMU: Checking for cgroup 'cpuacct' controller support                     : PASS
  QEMU: Checking for cgroup 'cpuacct' controller mount-point                 : PASS
  QEMU: Checking for cgroup 'cpuset' controller support                      : PASS
  QEMU: Checking for cgroup 'cpuset' controller mount-point                  : PASS
  QEMU: Checking for cgroup 'devices' controller support                     : PASS
  QEMU: Checking for cgroup 'devices' controller mount-point                 : PASS
  QEMU: Checking for cgroup 'blkio' controller support                       : PASS
  QEMU: Checking for cgroup 'blkio' controller mount-point                   : PASS
  QEMU: Checking for device assignment IOMMU support                         : PASS
  QEMU: Checking if IOMMU is enabled by kernel                               : WARN (IOMMU appears to be disabled in kernel. Add intel_iommu=on to kernel cmdline arguments)
   LXC: Checking for Linux >= 2.6.26                                         : PASS
   LXC: Checking for namespace ipc                                           : PASS
   LXC: Checking for namespace mnt                                           : PASS
   LXC: Checking for namespace pid                                           : PASS
   LXC: Checking for namespace uts                                           : PASS
   LXC: Checking for namespace net                                           : PASS
   LXC: Checking for namespace user                                          : PASS
   LXC: Checking for cgroup 'memory' controller support                      : PASS
   LXC: Checking for cgroup 'memory' controller mount-point                  : PASS
   LXC: Checking for cgroup 'cpu' controller support                         : PASS
   LXC: Checking for cgroup 'cpu' controller mount-point                     : PASS
   LXC: Checking for cgroup 'cpuacct' controller support                     : PASS
   LXC: Checking for cgroup 'cpuacct' controller mount-point                 : PASS
   LXC: Checking for cgroup 'cpuset' controller support                      : PASS
   LXC: Checking for cgroup 'cpuset' controller mount-point                  : PASS
   LXC: Checking for cgroup 'devices' controller support                     : PASS
   LXC: Checking for cgroup 'devices' controller mount-point                 : PASS
   LXC: Checking for cgroup 'blkio' controller support                       : PASS
   LXC: Checking for cgroup 'blkio' controller mount-point                   : PASS


[root@kvm8 ~(keystone_admin)]# nova service-list
/usr/lib/python2.7/site-packages/novaclient/client.py:278: UserWarning: The 'tenant_id' argument is deprecated in Ocata and its use may result in errors in future releases. As 'project_id' is provided, the 'tenant_id' argument will be ignored.
  warnings.warn(msg)
+----+------------------+----------------------+----------+---------+-------+----------------------------+-----------------+
| Id | Binary           | Host                 | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+----+------------------+----------------------+----------+---------+-------+----------------------------+-----------------+
| 9  | nova-cert        | kvm8.mia.ccncore.net | internal | enabled | up    | 2017-05-01T18:05:33.000000 | -               |
| 10 | nova-conductor   | kvm8.mia.ccncore.net | internal | enabled | up    | 2017-05-01T18:05:39.000000 | -               |
| 24 | nova-scheduler   | kvm8.mia.ccncore.net | internal | enabled | up    | 2017-05-01T18:05:37.000000 | -               |
| 25 | nova-consoleauth | kvm8.mia.ccncore.net | internal | enabled | up    | 2017-05-01T18:05:35.000000 | -               |
| 26 | nova-compute     | kvm8.mia.ccncore.net | nova     | enabled | up    | 2017-05-01T18:05:33.000000 | -               |
+----+------------------+----------------------+----------+---------+-------+----------------------------+-----------------+
[root@kvm8 ~(keystone_admin)]# nova hypervisor-show 1
/usr/lib/python2.7/site-packages/novaclient/client.py:278: UserWarning: The 'tenant_id' argument is deprecated in Ocata and its use may result in errors in future releases. As 'project_id' is provided, the 'tenant_id' argument will be ignored.
  warnings.warn(msg)
+---------------------------+------------------------------------------+
| Property                  | Value                                    |
+---------------------------+------------------------------------------+
| cpu_info_arch             | x86_64                                   |
| cpu_info_features         | ["pge", "clflush", "sep", "syscall",     |
|                           | "vme", "dtes64", "tsc", "vmx", "xtpr",   |
|                           | "cmov", "ssse3", "est", "pat",           |
|                           | "monitor", "smx", "pcid", "lm", "msr",   |
|                           | "fpu", "fxsr", "tm", "sse4.1", "pae",    |
|                           | "sse4.2", "pclmuldq", "acpi", "mmx",     |
|                           | "arat", "cx8", "mce", "de", "tm2", "ht", |
|                           | "dca", "pni", "pdcm", "mca", "pdpe1gb",  |
|                           | "apic", "sse", "pse", "ds", "invtsc",    |
|                           | "lahf_lm", "rdtscp", "nx", "aes",        |
|                           | "sse2", "ss", "ds_cpl", "pbe", "cx16",   |
|                           | "pse36", "mtrr", "popcnt"]               |
| cpu_info_model            | Westmere                                 |
| cpu_info_topology_cells   | 2                                        |
| cpu_info_topology_cores   | 6                                        |
| cpu_info_topology_sockets | 1                                        |
| cpu_info_topology_threads | 2                                        |
| cpu_info_vendor           | Intel                                    |
| current_workload          | 0                                        |
| disk_available_least      | 28                                       |
| free_disk_gb              | 63                                       |
| free_ram_mb               | 81395                                    |
| host_ip                   | 192.168.140.218                          |
| hypervisor_hostname       | kvm8.mia.ccncore.net                     |
| hypervisor_type           | QEMU                                     |
| hypervisor_version        | 1005003                                  |
| id                        | 1                                        |
| local_gb                  | 223                                      |
| local_gb_used             | 35                                       |
| memory_mb                 | 98291                                    |
| memory_mb_used            | 35584                                    |
| running_vms               | 4                                        |
| service_disabled_reason   | None                                     |
| service_host              | kvm8.mia.ccncore.net                     |
| service_id                | 26                                       |
| state                     | up                                       |
| status                    | enabled                                  |
| vcpus                     | 24                                       |
| vcpus_used                | 8                                        |
+---------------------------+------------------------------------------+

2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager NoValidHost_Remote: No valid host was found. There are not enough hosts available.
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager Traceback (most recent call last):
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager   File "/usr/lib/python2.7/site-packages/oslo_messaging/rpc/server.py", line 218, in inner
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager     return func(*args, **kwargs)
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager   File "/usr/lib/python2.7/site-packages/nova/scheduler/manager.py", line 98, in select_destinations
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager     dests = self.driver.select_destinations(ctxt, spec_obj)
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager   File "/usr/lib/python2.7/site-packages/nova/scheduler/filter_scheduler.py", line 79, in select_destinations
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager     raise exception.NoValidHost(reason=reason)
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager NoValidHost: No valid host was found. There are not enough hosts available.
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager
2017-05-01 17:59:45.942 15019 ERROR nova.conductor.manager
2017-05-01 17:59:46.004 15019 WARNING nova.scheduler.utils [req-2847079f-3777-47a6-ab03-5aeafca867d4 - - - - -] Failed to compute_task_build_instances: No valid host was found. There are not enough hosts available.