def _check_update_to_existing_type( existing_type: str, machine_type: str ) -> None: """Check the update to an existing machine type The aim here is to block operators from moving between the underying machine types, between versioned and aliased types or to an older version of the same type during an update. :param existing_type: The existing machine type :param machine_type: The new machine type :raises: nova.exception.InvalidMachineTypeUpdate """ # Check that we are not switching between types or between an alias and # versioned type such as q35 to pc-q35-5.2.0 etc. for m in SUPPORTED_TYPE_PATTERNS: if re.match(m, existing_type) and not re.match(m, machine_type): raise exception.InvalidMachineTypeUpdate( existing_machine_type=existing_type, machine_type=machine_type) # Now check that the new version isn't older than the original. # This needs to support x.y and x.y.z as used by RHEL shipped QEMU version_pattern = r'\d+\.\d+$|\d+\.\d+\.\d+$' if any(re.findall(version_pattern, existing_type)): existing_version = re.findall(version_pattern, existing_type)[0] new_version = re.findall(version_pattern, machine_type)[0] if (versionutils.convert_version_to_int(new_version) < versionutils.convert_version_to_int(existing_version)): raise exception.InvalidMachineTypeUpdate( existing_machine_type=existing_type, machine_type=machine_type)
def check_for_setup_error(self): if self.krest is None: msg = _("Unable to import 'krest' python module.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) else: conf = self.configuration self.client = self.krest.EndPoint(conf.san_ip, conf.san_login, conf.san_password, ssl_validate=False) v_rs = self.client.search("system/state") if hasattr(v_rs, 'hits') and v_rs.total != 0: ver = v_rs.hits[0].rest_api_version ver_exist = versionutils.convert_version_to_int(ver) ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION) if ver_exist < ver_min: msg = _("K2 rest api version should be " ">= %s.") % K2_MIN_VERSION LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) else: msg = _("K2 rest api version search failed.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg)
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None, op=operator.lt): """Check libvirt version, hypervisor version, and hypervisor type :param hv_type: hypervisor driver from the top of this file. """ conn = self.get_connection() try: if lv_ver is not None: libvirt_version = conn.getLibVersion() if op(libvirt_version, versionutils.convert_version_to_int(lv_ver)): return False if hv_ver is not None: hypervisor_version = conn.getVersion() if op(hypervisor_version, versionutils.convert_version_to_int(hv_ver)): return False if hv_type is not None: hypervisor_type = conn.getType() if hypervisor_type != hv_type: return False return True except Exception: return False
def check_for_setup_error(self): if krest is None: msg = _("Unable to import 'krest' python module.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) else: conf = self.configuration self.client = KrestWrap(conf.san_ip, conf.san_login, conf.san_password, ssl_validate=False) if self.replica: self.target = KrestWrap(self.replica.backend_id, self.replica.login, self.replica.password, ssl_validate=False) v_rs = self.client.search("system/state") if hasattr(v_rs, 'hits') and v_rs.total != 0: ver = v_rs.hits[0].rest_api_version ver_exist = versionutils.convert_version_to_int(ver) ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION) if ver_exist < ver_min: msg = _("K2 REST API version should be " ">= %s.") % K2_MIN_VERSION LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) else: msg = _("K2 REST API version search failed.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg)
def test_convert_version_to_int(self): self.assertEqual(6002000, versionutils.convert_version_to_int('6.2.0')) self.assertEqual(6004003, versionutils.convert_version_to_int( (6, 4, 3))) self.assertEqual(5, versionutils.convert_version_to_int((5, ))) self.assertRaises(ValueError, versionutils.convert_version_to_int, '5a.6b')
def test_convert_version_to_int(self): self.assertEqual(6002000, versionutils.convert_version_to_int('6.2.0')) self.assertEqual(6004003, versionutils.convert_version_to_int((6, 4, 3))) self.assertEqual(5, versionutils.convert_version_to_int((5, ))) self.assertRaises(ValueError, versionutils.convert_version_to_int, '5a.6b')
def _check_ovs_supported_version(session): # The port type 'ovs' is only support by the VC version 5.5 onwards min_version = versionutils.convert_version_to_int( constants.MIN_VC_OVS_VERSION) vc_version = versionutils.convert_version_to_int( vim_util.get_vc_version(session)) if vc_version < min_version: LOG.warning('VMware vCenter version less than %(version)s ' 'does not support the \'ovs\' port type.', {'version': constants.MIN_VC_OVS_VERSION})
def _version_check(self, req_ver=None, op=operator.lt): try: if req_ver is not None: cur = versionutils.convert_version_to_int(self._xcat_version) req = versionutils.convert_version_to_int(req_ver) if op(cur, req): return False return True except Exception: return False
def _check_min_version(self): min_version = v_utils.convert_version_to_int(constants.MIN_VC_VERSION) vc_version = vim_util.get_vc_version(self._session) LOG.info(_LI("VMware vCenter version: %s"), vc_version) if min_version > v_utils.convert_version_to_int(vc_version): # TODO(garyk): enforce this from M LOG.warning(_LW('Running Nova with a VMware vCenter version less ' 'than %(version)s is deprecated. The required ' 'minimum version of vCenter will be raised to ' '%(version)s in the 13.0.0 release.'), {'version': constants.MIN_VC_VERSION})
def update_status(self): """Update the current state of the cluster.""" capacity, freespace = _get_ds_capacity_and_freespace( self._session, self._cluster, self._datastore_regex) # Get cpu, memory stats from the cluster stats = vm_util.get_stats_from_cluster(self._session, self._cluster) about_info = self._session._call_method(vim_util, "get_about_info") data = {} data["vcpus"] = stats['vcpus'] data["disk_total"] = capacity / units.Gi data["disk_available"] = freespace / units.Gi data["disk_used"] = data["disk_total"] - data["disk_available"] data["host_memory_total"] = stats['mem']['total'] data["host_memory_free"] = stats['mem']['free'] data["hypervisor_type"] = about_info.name data["hypervisor_version"] = versionutils.convert_version_to_int( str(about_info.version)) data["hypervisor_hostname"] = self._host_name data["supported_instances"] = [ (arch.I686, hv_type.VMWARE, vm_mode.HVM), (arch.X86_64, hv_type.VMWARE, vm_mode.HVM) ] self._stats = data return data
def test_stat_consumption_from_compute_node_non_pci(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.RESIZE_MIGRATING: '1', 'num_task_%s' % task_states.MIGRATING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = versionutils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) self.assertEqual([], host.pci_stats.pools) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def get_available_resource(self, nodename): if not hasattr(self, '_nodename'): self._nodename = nodename if nodename != self._nodename: LOG.error('Hostname has changed from %(old)s to %(new)s. ' 'A restart is required to take effect.', {'old': self._nodename, 'new': nodename}) memory = hostinfo.get_memory_usage() disk = hostinfo.get_disk_usage() stats = { 'vcpus': hostinfo.get_total_vcpus(), 'vcpus_used': hostinfo.get_vcpus_used(self.list_instances(True)), 'memory_mb': memory['total'] / units.Mi, 'memory_mb_used': memory['used'] / units.Mi, 'local_gb': disk['total'] / units.Gi, 'local_gb_used': disk['used'] / units.Gi, 'disk_available_least': disk['available'] / units.Gi, 'hypervisor_type': 'docker', 'hypervisor_version': versionutils.convert_version_to_int('1.0'), 'hypervisor_hostname': self._nodename, 'cpu_info': '?', 'numa_topology': None, 'supported_instances': [ (fields.Architecture.I686, fields.HVType.DOCKER, fields.VMMode.EXE), (fields.Architecture.X86_64, fields.HVType.DOCKER, fields.VMMode.EXE) ] } return stats
def update_status(self): """Update the current state of the cluster.""" capacity, freespace = _get_ds_capacity_and_freespace(self._session, self._cluster, self._datastore_regex) # Get cpu, memory stats from the cluster stats = vm_util.get_stats_from_cluster(self._session, self._cluster) about_info = self._session._call_method(vim_util, "get_about_info") data = {} data["vcpus"] = stats['vcpus'] data["disk_total"] = capacity / units.Gi data["disk_available"] = freespace / units.Gi data["disk_used"] = data["disk_total"] - data["disk_available"] data["host_memory_total"] = stats['mem']['total'] data["host_memory_free"] = stats['mem']['free'] data["hypervisor_type"] = about_info.name data["hypervisor_version"] = versionutils.convert_version_to_int( str(about_info.version)) data["hypervisor_hostname"] = self._host_name data["supported_instances"] = [ (arch.I686, hv_type.VMWARE, vm_mode.HVM), (arch.X86_64, hv_type.VMWARE, vm_mode.HVM)] self._stats = data return data
def __init__(self, virtapi, read_only=False): super(FakeDriver, self).__init__(virtapi) self.instances = {} self.resources = Resources(vcpus=self.vcpus, memory_mb=self.memory_mb, local_gb=self.local_gb) self.host_status_base = { 'hypervisor_type': 'fake', 'hypervisor_version': versionutils.convert_version_to_int('1.0'), 'hypervisor_hostname': CONF.host, 'cpu_info': {}, 'disk_available_least': 0, 'supported_instances': [(obj_fields.Architecture.X86_64, obj_fields.HVType.FAKE, obj_fields.VMMode.HVM)], 'numa_topology': None, } self._mounts = {} self._interfaces = {} self.active_migrations = {} if not _FAKE_NODES: set_nodes([CONF.host]) self._nodes = copy.copy(_FAKE_NODES)
def __init__(self, virtapi, read_only=False): super(FakeDriver, self).__init__(virtapi) self.instances = {} self.resources = Resources( vcpus=self.vcpus, memory_mb=self.memory_mb, local_gb=self.local_gb) self.host_status_base = { 'hypervisor_type': 'fake', 'hypervisor_version': versionutils.convert_version_to_int('1.0'), 'hypervisor_hostname': CONF.host, 'cpu_info': {}, 'disk_available_least': 0, 'supported_instances': [( obj_fields.Architecture.X86_64, obj_fields.HVType.FAKE, obj_fields.VMMode.HVM)], 'numa_topology': None, } self._mounts = {} self._interfaces = {} self.active_migrations = {} if not _FAKE_NODES: set_nodes([CONF.host]) self._nodes = copy.copy(_FAKE_NODES)
def _get_minimum_version(cls, attribute, context, binary): services = ServiceList.get_all_by_binary(context, binary) min_ver = None min_ver_str = None for s in services: ver_str = getattr(s, attribute) if ver_str is None: # NOTE(dulek) None in *_current_version means that this # service is in Liberty version, which we now don't provide # backward compatibility to. msg = ( _( "Service %s is in Liberty version. We do not provide " "backward compatibility with Liberty now, so you " "need to upgrade it, release by release if live " "upgrade is required. After upgrade you may need to " "remove any stale service records via " '"cinder-manage service remove".' ) % s.binary ) raise exception.ServiceTooOld(msg) ver = versionutils.convert_version_to_int(ver_str) if min_ver is None or ver < min_ver: min_ver = ver min_ver_str = ver_str return min_ver_str
def test_stat_consumption_from_compute_node_non_pci(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.RESIZE_MIGRATING: '1', 'num_task_%s' % task_states.MIGRATING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = versionutils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) self.assertEqual([], host.pci_stats.pools) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def get_pinned_version(cls): # We pin the version by the last service that gets updated, which is # c-vol or c-bak min_obj_vers_str = cls.get_rpc_api().determine_obj_version_cap() # Get current pinned down version for this object version = base.OBJ_VERSIONS[min_obj_vers_str][cls.__name__] return versionutils.convert_version_to_int(version)
def test_image_properties_filter_passes_without_inst_props(self): spec_obj = compute.RequestSpec(image=None) hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_passes_without_inst_props(self): filter_properties = {'request_spec': {}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = { 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def _check_min_version(self): min_version = v_utils.convert_version_to_int(constants.MIN_VC_VERSION) next_min_ver = v_utils.convert_version_to_int( constants.NEXT_MIN_VC_VERSION) vc_version = vim_util.get_vc_version(self._session) LOG.info("VMware vCenter version: %s", vc_version) if v_utils.convert_version_to_int(vc_version) < min_version: raise exception.NovaException( _('Detected vCenter version %(version)s. Nova requires VMware ' 'vCenter version %(min_version)s or greater.') % { 'version': vc_version, 'min_version': constants.MIN_VC_VERSION}) elif v_utils.convert_version_to_int(vc_version) < next_min_ver: LOG.warning('Running Nova with a VMware vCenter version less ' 'than %(version)s is deprecated. The required ' 'minimum version of vCenter will be raised to ' '%(version)s in the 16.0.0 release.', {'version': constants.NEXT_MIN_VC_VERSION})
def test_image_properties_filter_xen_hv_type_compat(self): # if an old image has 'xapi' for hv_type it should be treated as xen img_props = {'properties': {'hypervisor_type': 'xapi'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.I686, hv_type.XEN, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_partial_inst_props(self): img_props = {'properties': {'architecture': arch.X86_64, 'vm_mode': vm_mode.HVM}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_without_host_props(self): img_props = {'properties': {'architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'enabled': True, 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: ignored in this driver :returns: dictionary describing resources """ host_stats = self.host_state.get_host_stats(refresh=True) # Updating host information total_ram_mb = host_stats['host_memory_total'] / units.Mi # NOTE(belliott) memory-free-computed is a value provided by XenServer # for gauging free memory more conservatively than memory-free. free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi total_disk_gb = host_stats['disk_total'] / units.Gi used_disk_gb = host_stats['disk_used'] / units.Gi allocated_disk_gb = host_stats['disk_allocated'] / units.Gi hyper_ver = versionutils.convert_version_to_int( self._session.product_version) dic = { 'vcpus': host_stats['host_cpu_info']['cpu_count'], 'memory_mb': total_ram_mb, 'local_gb': total_disk_gb, 'vcpus_used': host_stats['vcpus_used'], 'memory_mb_used': total_ram_mb - free_ram_mb, 'local_gb_used': used_disk_gb, 'hypervisor_type': 'XenServer', 'hypervisor_version': hyper_ver, 'hypervisor_hostname': host_stats['host_hostname'], 'cpu_info': jsonutils.dumps(host_stats['cpu_model']), 'disk_available_least': total_disk_gb - allocated_disk_gb, 'supported_instances': host_stats['supported_instances'], 'pci_passthrough_devices': jsonutils.dumps(host_stats['pci_passthrough_devices']), 'numa_topology': None } return dic
def fake_get_compute(context, host): service = dict(host=host, binary='nova-compute', topic='compute', report_count=1, updated_at='foo', hypervisor_type='bar', hypervisor_version=( versionutils.convert_version_to_int('1.0')), disabled=False) return {'compute_node': [service]}
def test_image_properties_filter_fails_partial_inst_props(self): img_props = objects.ImageMeta(properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = { 'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_baremetal_vmmode_compat(self): # if an old image has 'baremetal' for vmmode it should be # treated as hvm img_props = {'properties': {'vm_mode': 'baremetal'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.I686, hv_type.BAREMETAL, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_without_inst_props(self): spec_obj = objects.RequestSpec(image=None) hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = { 'supported_instances': [( obj_fields.Architecture.X86_64, obj_fields.HVType.KVM, obj_fields.VMMode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) self._startup_delay = True self.volume_api = volume_rpcapi.VolumeAPI() self.sch_api = scheduler_rpcapi.SchedulerAPI() self.rpc_api_version = versionutils.convert_version_to_int( self.RPC_API_VERSION)
def test_image_properties_filter_xen_hv_type_compat(self): # if an old image has 'xapi' for hv_type it should be treated as xen img_props = {'properties': {'hypervisor_type': 'xapi'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = { 'supported_instances': [(arch.I686, hv_type.XEN, vm_mode.HVM)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_stat_consumption_from_compute_node(self, sync_mock): stats = { "num_instances": "5", "num_proj_12345": "3", "num_proj_23456": "1", "num_vm_%s" % vm_states.BUILDING: "2", "num_vm_%s" % vm_states.SUSPENDED: "1", "num_task_%s" % task_states.RESIZE_MIGRATING: "1", "num_task_%s" % task_states.MIGRATING: "2", "num_os_type_linux": "4", "num_os_type_windoze": "1", "io_workload": "42", } hyper_ver_int = versionutils.convert_version_to_int("6.0.0") compute = objects.ComputeNode( stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip="127.0.0.1", hypervisor_type="htype", hypervisor_hostname="hostname", cpu_info="cpu_info", supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, ) host = host_manager.HostState("fakehost", "fakenode") host.update(compute=compute) sync_mock.assert_called_once_with(("fakehost", "fakenode")) self.assertEqual(5, host.num_instances) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertEqual("127.0.0.1", str(host.host_ip)) self.assertEqual("htype", host.hypervisor_type) self.assertEqual("hostname", host.hypervisor_hostname) self.assertEqual("cpu_info", host.cpu_info) self.assertEqual([], host.supported_instances) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) self._startup_delay = True self.volume_api = volume_rpcapi.VolumeAPI() self.sch_api = scheduler_rpcapi.SchedulerAPI() self.message_api = mess_api.API() self.rpc_api_version = versionutils.convert_version_to_int( self.RPC_API_VERSION)
def test_image_properties_filter_xen_arch_compat(self): # if an old image has 'x86_32' for arch it should be treated as i686 img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture='x86_32')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.I686, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_xen_hv_type_compat(self): # if an old image has 'xapi' for hv_type it should be treated as xen img_props = objects.ImageMeta(properties=objects.ImageMetaProps( img_hv_type='xapi')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = { 'supported_instances': [(arch.I686, hv_type.XEN, vm_mode.HVM)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_xen_hv_type_compat(self): # if an old image has 'xapi' for hv_type it should be treated as xen img_props = compute.ImageMeta( properties=compute.ImageMetaProps( img_hv_type='xapi')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = compute.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.I686, hv_type.XEN, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_pv_mode_compat(self): # if an old image has 'pv' for a vm_mode it should be treated as xen img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_vm_mode='pv')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_partial_inst_props(self): img_props = compute.ImageMeta( properties=compute.ImageMetaProps( hw_architecture=arch.X86_64, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = compute.RequestSpec(image=img_props) capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_without_host_props(self): img_props = compute.ImageMeta( properties=compute.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = compute.RequestSpec(image=img_props) capabilities = {'enabled': True, 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def fake_get_compute(context, host): service = dict( host=host, binary="nova-compute", topic="compute", report_count=1, updated_at="foo", hypervisor_type="bar", hypervisor_version=(versionutils.convert_version_to_int("1.0")), disabled=False, ) return {"compute_node": [service]}
def test_image_properties_filter_baremetal_vmmode_compat(self): # if an old image has 'baremetal' for vmmode it should be # treated as hvm img_props = {'properties': {'vm_mode': 'baremetal'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = { 'supported_instances': [(arch.I686, hv_type.BAREMETAL, vm_mode.HVM)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def __init__(self, session, host_name, cluster, datastore_regex): super(VCState, self).__init__() self._session = session self._host_name = host_name self._cluster = cluster self._datastore_regex = datastore_regex self._stats = {} self._auto_service_disabled = False about_info = self._session._call_method(vim_util, "get_about_info") self._hypervisor_type = about_info.name self._hypervisor_version = versionutils.convert_version_to_int( str(about_info.version)) self.update_status()
def test_image_properties_filter_fails_different_hyper_version(self): img_props = {'properties': {'architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM, 'hypervisor_version_requires': '>=6.2'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def fake_get_compute(context, host): # TODO(stephenfin): It's gross that we even need this in a functional test # where we can control the running compute services. Stop doing it. service = dict(host=host, binary='nova-compute', topic='compute', report_count=1, updated_at='foo', hypervisor_type='bar', hypervisor_version=( versionutils.convert_version_to_int('1.0')), disabled=False) return {'compute_node': [service]}
def fake_get_compute(context, host): # TODO(stephenfin): It's gross that we even need this in a functional test # where we can control the running compute services. Stop doing it. service = dict( host=host, binary='nova-compute', topic='compute', report_count=1, updated_at='foo', hypervisor_type='bar', hypervisor_version=(versionutils.convert_version_to_int('1.0')), disabled=False) return {'compute_node': [service]}
def test_image_properties_filter_fails_without_host_props(self): img_props = objects.ImageMeta( properties=objects.ImageMetaProps( hw_architecture=obj_fields.Architecture.X86_64, img_hv_type=obj_fields.HVType.KVM, hw_vm_mode=obj_fields.VMMode.HVM)) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = { 'enabled': True, 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_uses_default_conf_value(self): self.flags(image_properties_default_architecture='x86_64', group='filter_scheduler') img_props = objects.ImageMeta(properties=objects.ImageMetaProps()) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = { 'supported_instances': [( obj_fields.Architecture.AARCH64, obj_fields.HVType.KVM, obj_fields.VMMode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_hvm_mode_compat(self): # if an old image has 'hv' for a vm_mode it should be treated as xen img_props = objects.ImageMeta(properties=objects.ImageMetaProps( hw_vm_mode='hv')) hypervisor_version = versionutils.convert_version_to_int('6.0.0') spec_obj = objects.RequestSpec(image=img_props) capabilities = { 'supported_instances': [(obj_fields.Architecture.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_passes_same_inst_props_and_version(self): img_props = objects.ImageMeta(properties=objects.ImageMetaProps( hw_architecture=arch.X86_64, img_hv_type=hv_type.KVM, hw_vm_mode=vm_mode.HVM, img_hv_requested_version='>=6.0,<6.2')) spec_obj = objects.RequestSpec(image=img_props) hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = { 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def start_compute(self, hostname='compute1'): libvirt_version = versionutils.convert_version_to_int( driver.MIN_LIBVIRT_VTPM) qemu_version = versionutils.convert_version_to_int( driver.MIN_QEMU_VTPM) fake_connection = self._get_connection(libvirt_version=libvirt_version, qemu_version=qemu_version, hostname=hostname) # This is fun. Firstly we need to do a global'ish mock so we can # actually start the service. with mock.patch( 'nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection, ): compute = self.start_service('compute', host=hostname) # Once that's done, we need to tweak the compute "service" to # make sure it returns unique objects. We do this inside the # mock context to avoid a small window between the end of the # context and the tweaking where get_connection would revert to # being an autospec mock. compute.driver._host.get_connection = lambda: fake_connection return compute
def test_image_properties_filter_fails_partial_inst_props(self): img_props = { 'properties': { 'architecture': arch.X86_64, 'vm_mode': vm_mode.HVM } } filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = versionutils.convert_version_to_int('6.0.0') capabilities = { 'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version } host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))