def build_instances(self, ctxt, **kwargs): """Build instances.""" build_inst_kwargs = kwargs instances = build_inst_kwargs['instances'] build_inst_kwargs['image'] = jsonutils.to_primitive( build_inst_kwargs['image']) version = '1.34' if self.client.can_send_version('1.34'): build_inst_kwargs.pop('legacy_bdm', None) else: bdm_p = objects_base.obj_to_primitive( build_inst_kwargs['block_device_mapping']) build_inst_kwargs['block_device_mapping'] = bdm_p version = '1.32' if not self.client.can_send_version('1.32'): instances_p = [jsonutils.to_primitive(inst) for inst in instances] build_inst_kwargs['instances'] = instances_p version = '1.30' if not self.client.can_send_version('1.30'): if 'filter_properties' in build_inst_kwargs: filter_properties = build_inst_kwargs['filter_properties'] flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties['instance_type'] = flavor_p version = '1.8' cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'build_instances', build_inst_kwargs=build_inst_kwargs)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): image_p = jsonutils.to_primitive(image) version = '1.10' if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) kw = {'instances': instances, 'image': image_p, 'filter_properties': filter_properties, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'security_groups': security_groups} if not self.client.can_send_version(version): version = '1.8' kw['requested_networks'] = kw['requested_networks'].as_tuples() if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kw.update({'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm}) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kw)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True, request_spec=None): kw = {'instance': instance, 'scheduler_hint': scheduler_hint, 'live': live, 'rebuild': rebuild, 'flavor': flavor, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'reservations': reservations, 'clean_shutdown': clean_shutdown, 'request_spec': request_spec, } version = '1.13' if not self.client.can_send_version(version): del kw['request_spec'] version = '1.11' if not self.client.can_send_version(version): del kw['clean_shutdown'] version = '1.10' if not self.client.can_send_version(version): kw['flavor'] = objects_base.obj_to_primitive(flavor) version = '1.6' if not self.client.can_send_version(version): kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) version = '1.4' cctxt = self.client.prepare(version=version) return cctxt.call(context, 'migrate_server', **kw)
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if isinstance(instance, obj_base.NovaObject): instance = obj_base.obj_to_primitive(instance) if instance_type is None: instance_type = flavors.extract_flavor(instance) # NOTE(danms): This won't have extra_specs, so fill in the gaps _instance_type = objects.Flavor.get_by_flavor_id( ctxt, instance_type['flavorid']) instance_type.extra_specs = instance_type.get('extra_specs', {}) instance_type.extra_specs.update(_instance_type.extra_specs) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances)} return jsonutils.to_primitive(request_spec)
def _get_usage_dict(self, object_or_dict, **updates): """Make a usage dict _update methods expect. Accepts a dict or an Instance or Flavor object, and a set of updates. Converts the object to a dict and applies the updates. :param object_or_dict: instance or flavor as an object or just a dict :param updates: key-value pairs to update the passed object. Currently only considers 'numa_topology', all other keys are ignored. :returns: a dict with all the information from object_or_dict updated with updates """ usage = {} if isinstance(object_or_dict, objects.Instance): usage = obj_base.obj_to_primitive(object_or_dict) elif isinstance(object_or_dict, objects.Flavor): usage = obj_base.obj_to_primitive(object_or_dict) else: usage.update(object_or_dict) for key in ('numa_topology',): if key in updates: usage[key] = updates[key] return usage
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if isinstance(instance, obj_base.NovaObject): instance = obj_base.obj_to_primitive(instance) if instance_type is None: instance_type = flavors.extract_flavor(instance) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances), # NOTE(alaski): This should be removed as logic moves from the # scheduler to conductor. Provides backwards compatibility now. 'instance_uuids': [inst['uuid'] for inst in instances]} return jsonutils.to_primitive(request_spec)
def build_request_spec(image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. :param image: optional primitive image meta dict :param instances: list of instances; objects will be converted to primitives :param instance_type: optional flavor; objects will be converted to primitives :return: dict with the following keys:: 'image': the image dict passed in or {} 'instance_properties': primitive version of the first instance passed 'instance_type': primitive version of the instance_type or None 'num_instances': the number of instances passed in """ instance = instances[0] if instance_type is None: if isinstance(instance, obj_instance.Instance): instance_type = instance.get_flavor() else: instance_type = flavors.extract_flavor(instance) if isinstance(instance, obj_instance.Instance): instance = obj_base.obj_to_primitive(instance) # obj_to_primitive doesn't copy this enough, so be sure # to detach our metadata blob because we modify it below. instance['system_metadata'] = dict(instance.get('system_metadata', {})) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) # NOTE(danms): Replicate this old behavior because the # scheduler RPC interface technically expects it to be # there. Remove this when we bump the scheduler RPC API to # v5.0 try: flavors.save_flavor_info(instance.get('system_metadata', {}), instance_type) except KeyError: # If the flavor isn't complete (which is legit with a # flavor object, just don't put it in the request spec pass request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances)} # NOTE(mriedem): obj_to_primitive above does not serialize everything # in an object, like datetime fields, so we need to still call to_primitive # to recursively serialize the items in the request_spec dict. return jsonutils.to_primitive(request_spec)
def finish_revert_resize(self, ctxt, instance, migration, host, reservations=None): if self.client.can_send_version('2.47'): version = '2.47' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.13' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'finish_revert_resize', instance=instance, migration=migration, reservations=reservations)
def revert_resize(self, ctxt, instance, migration, host, reservations=None): if self.can_send_version('2.39'): version = '2.39' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.12' self.cast(ctxt, self.make_msg('revert_resize', instance=instance, migration=migration, reservations=reservations), topic=_compute_topic(self.topic, ctxt, host, instance), version=version)
def revert_resize(self, ctxt, instance, migration, host, reservations=None): if self.client.can_send_version('2.39'): version = '2.39' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.12' cctxt = self.client.prepare(server=_compute_host(host, instance), version=_get_version(version)) cctxt.cast(ctxt, 'revert_resize', instance=instance, migration=migration, reservations=reservations)
def finish_resize(self, ctxt, instance, migration, image, disk_info, host, reservations=None): if self.client.can_send_version('2.46'): version = '2.46' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.8' cctxt = self.client.prepare(server=host, version=_get_version(version)) cctxt.cast(ctxt, 'finish_resize', instance=instance, migration=migration, image=image, disk_info=disk_info, reservations=reservations)
def _create_instances_here(self, ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): instance_values = copy.copy(instance_properties) # The parent may pass these metadata values as lists, and the # create call expects it to be a dict. instance_values['metadata'] = utils.instance_meta(instance_values) sys_metadata = utils.instance_sys_meta(instance_values) # Make sure the flavor info is set. It may not have been passed # down. sys_metadata = flavors.save_flavor_info(sys_metadata, instance_type) instance_values['system_metadata'] = sys_metadata # Pop out things that will get set properly when re-creating the # instance record. instance_values.pop('id') instance_values.pop('name') instance_values.pop('info_cache') instance_values.pop('security_groups') num_instances = len(instance_uuids) for i, instance_uuid in enumerate(instance_uuids): instance = instance_obj.Instance() instance.update(instance_values) instance.uuid = instance_uuid instance = self.compute_api.create_db_entry_for_new_instance( ctxt, instance_type, image, instance, security_groups, block_device_mapping, num_instances, i) instance = obj_base.obj_to_primitive(instance) self.msg_runner.instance_update_at_top(ctxt, instance)
def test_check_can_live_migrate_source(self): is_volume_backed = 'volume_backed' bdms = 'bdms' dest_check_data = dict(foo='bar') db_instance = fake_instance.fake_db_instance() instance = instance_obj.Instance._from_db_object( self.context, instance_obj.Instance(), db_instance) expected_dest_check_data = dict(dest_check_data, is_volume_backed=is_volume_backed) self.mox.StubOutWithMock(self.compute.conductor_api, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.compute.compute_api, 'is_volume_backed_instance') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_source') instance_p = obj_base.obj_to_primitive(instance) self.compute.conductor_api.block_device_mapping_get_all_by_instance( self.context, instance_p).AndReturn(bdms) self.compute.compute_api.is_volume_backed_instance( self.context, instance, bdms).AndReturn(is_volume_backed) self.compute.driver.check_can_live_migrate_source( self.context, instance, expected_dest_check_data) self.mox.ReplayAll() self.compute.check_can_live_migrate_source( self.context, instance=instance, dest_check_data=dest_check_data)
def prep_resize( self, ctxt, image, instance, instance_type, host, reservations=None, request_spec=None, filter_properties=None, node=None, clean_shutdown=True, ): image_p = jsonutils.to_primitive(image) msg_args = { "instance": instance, "instance_type": instance_type, "image": image_p, "reservations": reservations, "request_spec": request_spec, "filter_properties": filter_properties, "node": node, "clean_shutdown": clean_shutdown, } version = "4.1" if not self.client.can_send_version(version): version = "4.0" msg_args["instance_type"] = objects_base.obj_to_primitive(instance_type) cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, "prep_resize", **msg_args)
def compat_instance(instance): """Create a dict-like instance structure from an objects.Instance. This is basically the same as nova.objects.base.obj_to_primitive(), except that it includes some instance-specific details, like stashing flavor information in system_metadata. If you have a function (or RPC client) that needs to see the instance as a dict that has flavor information in system_metadata, use this to appease it (while you fix said thing). :param instance: a nova.objects.Instance instance :returns: a dict-based instance structure """ if not isinstance(instance, objects.Instance): return instance db_instance = copy.deepcopy(base.obj_to_primitive(instance)) flavor_attrs = [("", "flavor"), ("old_", "old_flavor"), ("new_", "new_flavor")] for prefix, attr in flavor_attrs: flavor = instance.obj_attr_is_set(attr) and getattr(instance, attr) or None if flavor: # NOTE(danms): If flavor is unset or None, don't # copy it into the primitive's system_metadata db_instance["system_metadata"] = flavors.save_flavor_info( db_instance.get("system_metadata", {}), flavor, prefix ) if attr in db_instance: del db_instance[attr] return db_instance
def test_get_all_host_states(self, mock_get_by_host, mock_get_all, mock_get_by_binary): mock_get_all.return_value = ironic_fakes.COMPUTE_NODES mock_get_by_binary.return_value = ironic_fakes.SERVICES context = 'fake_context' self.host_manager.get_all_host_states(context) self.assertEqual(0, mock_get_by_host.call_count) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node.host node = compute_node.hypervisor_hostname state_key = (host, node) self.assertEqual(host_states_map[state_key].service, obj_base.obj_to_primitive( ironic_fakes.get_service_by_host(host))) self.assertEqual(compute_node.stats, host_states_map[state_key].stats) self.assertEqual(compute_node.free_ram_mb, host_states_map[state_key].free_ram_mb) self.assertEqual(compute_node.free_disk_gb * 1024, host_states_map[state_key].free_disk_mb)
def refresh_instance_security_rules(self, ctxt, host, instance): version = "4.4" if not self.client.can_send_version(version): version = "4.0" instance = objects_base.obj_to_primitive(instance) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, "refresh_instance_security_rules", instance=instance)
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if isinstance(instance, obj_base.NovaObject): instance = obj_base.obj_to_primitive(instance) if instance_type is None: instance_type = flavors.extract_flavor(instance) # NOTE(comstud): This is a bit ugly, but will get cleaned up when # we're passing an InstanceType internal object. extra_specs = db.flavor_extra_specs_get(ctxt, instance_type['flavorid']) instance_type['extra_specs'] = extra_specs request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances), # NOTE(alaski): This should be removed as logic moves from the # scheduler to conductor. Provides backwards compatibility now. 'instance_uuids': [inst['uuid'] for inst in instances]} return jsonutils.to_primitive(request_spec)
def _find_destination(self): #TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = None if self.instance.image_ref: image = compute_utils.get_image_metadata(self.context, self.image_service, self.instance.image_ref, self.instance) instance_p = obj_base.obj_to_primitive(self.instance) request_spec = scheduler_utils.build_request_spec(self.context, image, [instance_p]) host = None while host is None: self._check_not_over_max_retries(attempted_hosts) filter_properties = {'ignore_hosts': attempted_hosts} host = self.scheduler_rpcapi.select_hosts(self.context, request_spec, filter_properties)[0] try: self._check_compatible_with_source_hypervisor(host) self._call_livem_checks_on_host(host) except exception.Invalid as e: LOG.debug(_("Skipping host: %(host)s because: %(e)s") % {"host": host, "e": e}) attempted_hosts.append(host) host = None return host
def confirm_resize(self, ctxt, instance, migration, host, reservations=None, cast=True): if self.client.can_send_version('2.39'): version = '2.39' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.7' cctxt = self.client.prepare(server=_compute_host(host, instance), version=_get_version(version)) rpc_method = cctxt.cast if cast else cctxt.call return rpc_method(ctxt, 'confirm_resize', instance=instance, migration=migration, reservations=reservations)
def _update_usage_from_instance(self, context, instance, is_removed=False): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances is_removed_instance = ( is_removed or instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL) if is_new_instance: self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance) sign = 1 if is_removed_instance: self.tracked_instances.pop(uuid) sign = -1 self.stats.update_stats_for_instance(instance, is_removed_instance) # if it's a new or deleted instance: if is_new_instance or is_removed_instance: if self.pci_tracker: self.pci_tracker.update_pci_for_instance(context, instance, sign=sign) # new instance, update compute node resource usage: self._update_usage(instance, sign=sign) self.compute_node.current_workload = self.stats.calculate_workload() if self.pci_tracker: obj = self.pci_tracker.stats.to_device_pools_obj() self.compute_node.pci_device_pools = obj else: self.compute_node.pci_device_pools = objects.PciDevicePoolList()
def set_network_host(self, ctxt, network_ref): version = '1.15' if not self.client.can_send_version(version): version = '1.0' network_ref = objects_base.obj_to_primitive(network_ref) cctxt = self.client.prepare(version=version) return cctxt.call(ctxt, 'set_network_host', network_ref=network_ref)
def confirm_resize(self, ctxt, instance, migration, host, reservations=None, cast=True): rpc_method = self.cast if cast else self.call if self.can_send_version('2.39'): version = '2.39' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.7' return rpc_method(ctxt, self.make_msg('confirm_resize', instance=instance, migration=migration, reservations=reservations), topic=_compute_topic(self.topic, ctxt, host, instance), version=version)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): version = '3.36' if not self.client.can_send_version(version): version = '3.33' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) if not self.client.can_send_version(version): version = '3.23' if requested_networks is not None: requested_networks = [(network_id, address, port_id) for (network_id, address, port_id, _) in requested_networks.as_tuples()] cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', instance=instance, image=image, request_spec=request_spec, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, node=node, limits=limits)
def _update_usage_from_instance(self, context, resources, instance): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances is_deleted_instance = instance['vm_state'] == vm_states.DELETED if is_new_instance: self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance) sign = 1 if is_deleted_instance: self.tracked_instances.pop(uuid) sign = -1 self.stats.update_stats_for_instance(instance) if self.pci_tracker: self.pci_tracker.update_pci_for_instance(context, instance) # if it's a new or deleted instance: if is_new_instance or is_deleted_instance: # new instance, update compute node resource usage: self._update_usage(context, resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([])
def instance_destroy(self, context, instance): if not isinstance(instance, objects.Instance): instance = objects.Instance._from_db_object(context, objects.Instance(), instance) instance.destroy() return nova_object.obj_to_primitive(instance)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files, image_ref, orig_image_ref, orig_sys_metadata, bdms, recreate=False, on_shared_storage=False, host=None, preserve_ephemeral=False, kwargs=None): # NOTE(danms): kwargs is only here for cells compatibility, don't # actually send it to compute extra = {'preserve_ephemeral': preserve_ephemeral} if self.client.can_send_version('3.21'): version = '3.21' else: bdms = block_device.legacy_mapping(bdms) bdms = jsonutils.to_primitive(objects_base.obj_to_primitive(bdms)) if self.client.can_send_version('3.5'): version = '3.5' elif self.client.can_send_version('3.4'): version = '3.4' extra = {} else: # NOTE(russellb) Havana compat version = self._get_compat_version('3.0', '2.22') instance = jsonutils.to_primitive(instance) extra = {} cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'rebuild_instance', instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, **extra)
def _test_extract_flavor(self, prefix): instance_type = objects.Flavor.get_by_name(self.context, 'm1.small') instance_type_p = obj_base.obj_to_primitive(instance_type) metadata = {} flavors.save_flavor_info(metadata, instance_type, prefix) instance = {'system_metadata': self._dict_to_metadata(metadata)} _instance_type = flavors.extract_flavor(instance, prefix) _instance_type_p = obj_base.obj_to_primitive(_instance_type) props = flavors.system_metadata_flavor_props.keys() for key in list(instance_type_p.keys()): if key not in props: del instance_type_p[key] self.assertEqual(instance_type_p, _instance_type_p)
def test_get_all_host_states(self, mock_gbh): # Ensure .service is set and we have the values we expect to. context = "fake_context" self.mox.StubOutWithMock(objects.ServiceList, "get_by_binary") self.mox.StubOutWithMock(objects.ComputeNodeList, "get_all") objects.ServiceList.get_by_binary(context, "nova-compute").AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) self.assertEqual(0, mock_gbh.call_count) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node.host node = compute_node.hypervisor_hostname state_key = (host, node) self.assertEqual( host_states_map[state_key].service, obj_base.obj_to_primitive(ironic_fakes.get_service_by_host(host)) ) self.assertEqual(compute_node.stats, host_states_map[state_key].stats) self.assertEqual(compute_node.free_ram_mb, host_states_map[state_key].free_ram_mb) self.assertEqual(compute_node.free_disk_gb * 1024, host_states_map[state_key].free_disk_mb)
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if instance_type is None: if isinstance(instance, objects.Instance): instance_type = instance.get_flavor() else: instance_type = flavors.extract_flavor(instance) if isinstance(instance, objects.Instance): instance = instance_obj.compat_instance(instance) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances)} return jsonutils.to_primitive(request_spec)
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self): instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = None filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive(obj_base.obj_to_primitive(instance)) } } host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def volume_snapshot_create(self, ctxt, instance, volume_id, create_info): version = '3.6' if not self.client.can_send_version(version): # NOTE(russellb) Havana compat version = self._get_compat_version('3.0', '2.44') instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance, volume_id=volume_id, create_info=create_info)
def test_obj_to_primitive_with_ip_addr(self): class TestObject(base.NovaObject): fields = { 'addr': fields.IPAddressField(), 'cidr': fields.IPNetworkField() } obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16') self.assertEqual({ 'addr': '1.2.3.4', 'cidr': '1.1.1.1/16' }, base.obj_to_primitive(obj))
def test_obj_to_primitive_recursive(self): class MyList(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyObj')} mylist = MyList(objects=[MyObj(), MyObj()]) for i, value in enumerate(mylist): value.foo = i self.assertEqual([{ 'foo': 0 }, { 'foo': 1 }], base.obj_to_primitive(mylist))
def build_and_run_instance(self, ctxt, instance, host, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): version = '4.0' if not self.client.can_send_version(version): version = '3.40' if not self.client.can_send_version(version): version = '3.36' if 'numa_topology' in limits and limits['numa_topology']: topology_limits = limits['numa_topology'] if node is not None: cnode = objects.ComputeNode.get_by_host_and_nodename( ctxt, host, node) else: cnode = ( objects.ComputeNode. get_first_node_by_host_for_old_compat( ctxt, host)) host_topology = objects.NUMATopology.obj_from_db_obj( cnode.numa_topology) limits['numa_topology'] = jsonutils.dumps( topology_limits.to_dict_legacy(host_topology)) if not self.client.can_send_version(version): version = '3.33' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) if not self.client.can_send_version(version): version = '3.23' if requested_networks is not None: if utils.is_neutron(): requested_networks = [(network_id, address, port_id) for (network_id, address, port_id, _) in requested_networks.as_tuples()] else: requested_networks = [(network_id, address) for (network_id, address) in requested_networks.as_tuples()] cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', instance=instance, image=image, request_spec=request_spec, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, node=node, limits=limits)
def revert_resize(self, ctxt, instance, migration, host, reservations=None): if self.client.can_send_version('2.39'): version = '2.39' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.12' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'revert_resize', instance=instance, migration=migration, reservations=reservations)
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if instance_type is None: if isinstance(instance, obj_instance.Instance): instance_type = instance.get_flavor() else: instance_type = flavors.extract_flavor(instance) if isinstance(instance, obj_instance.Instance): instance = obj_base.obj_to_primitive(instance) # obj_to_primitive doesn't copy this enough, so be sure # to detach our metadata blob because we modify it below. instance['system_metadata'] = dict(instance.get('system_metadata', {})) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) # NOTE(danms): Replicate this old behavior because the # scheduler RPC interface technically expects it to be # there. Remove this when we bump the scheduler RPC API to # v5.0 try: flavors.save_flavor_info(instance.get('system_metadata', {}), instance_type) except KeyError: # If the flavor isn't complete (which is legit with a # flavor object, just don't put it in the request spec pass request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances) } return jsonutils.to_primitive(request_spec)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True, request_spec=None): kw = { 'instance': instance, 'scheduler_hint': scheduler_hint, 'live': live, 'rebuild': rebuild, 'flavor': flavor, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'reservations': reservations, 'clean_shutdown': clean_shutdown, 'request_spec': request_spec, } version = '1.13' if not self.client.can_send_version(version): del kw['request_spec'] version = '1.11' if not self.client.can_send_version(version): del kw['clean_shutdown'] version = '1.10' if not self.client.can_send_version(version): kw['flavor'] = objects_base.obj_to_primitive(flavor) version = '1.6' if not self.client.can_send_version(version): kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) version = '1.4' cctxt = self.client.prepare(version=version) return cctxt.call(context, 'migrate_server', **kw)
def _output(self, req, migrations_obj, add_link=False, add_uuid=False, add_user_project=False): """Returns the desired output of the API from an object. From a MigrationsList's object this method returns a list of primitive objects with the only necessary fields. """ detail_keys = [ 'memory_total', 'memory_processed', 'memory_remaining', 'disk_total', 'disk_processed', 'disk_remaining' ] # TODO(Shaohe Feng) we should share the in-progress list. live_migration_in_progress = [ 'queued', 'preparing', 'running', 'post-migrating' ] # Note(Shaohe Feng): We need to leverage the oslo.versionedobjects. # Then we can pass the target version to it's obj_to_primitive. objects = obj_base.obj_to_primitive(migrations_obj) objects = [x for x in objects if not x['hidden']] for obj in objects: del obj['deleted'] del obj['deleted_at'] del obj['hidden'] del obj['cross_cell_move'] if not add_uuid: del obj['uuid'] if 'memory_total' in obj: for key in detail_keys: del obj[key] if not add_user_project: if 'user_id' in obj: del obj['user_id'] if 'project_id' in obj: del obj['project_id'] # NOTE(Shaohe Feng) above version 2.23, add migration_type for all # kinds of migration, but we only add links just for in-progress # live-migration. if add_link and obj['migration_type'] == "live-migration" and ( obj["status"] in live_migration_in_progress): obj["links"] = self._view_builder._get_links( req, obj["id"], self._collection_name % obj['instance_uuid']) elif add_link is False: del obj['migration_type'] return objects
def terminate_instance(self, ctxt, instance, bdms, reservations=None): # NOTE(russellb) Havana compat if self.client.can_send_version('3.22'): version = '3.22' else: version = self._get_compat_version('3.0', '2.35') bdms = block_device.legacy_mapping(bdms) bdms = jsonutils.to_primitive(objects_base.obj_to_primitive(bdms)) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'terminate_instance', instance=instance, bdms=bdms, reservations=reservations)
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self): instance = fake_instance.fake_instance_obj(self.context) instance.numa_topology = None filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive(obj_base.obj_to_primitive(instance)) } } host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY}) filt_cls = self.class_map['NUMATopologyFilter']() self.assertTrue(filt_cls.host_passes(host, filter_properties))
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): if self.client.can_send_version('2.38'): version = '2.38' else: version = '2.0' instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'check_can_live_migrate_source', instance=instance, dest_check_data=dest_check_data)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): if self.can_send_version('2.38'): version = '2.38' else: version = '2.0' instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) return self.call(ctxt, self.make_msg('check_can_live_migrate_source', instance=instance, dest_check_data=dest_check_data), topic=_compute_topic(self.topic, ctxt, None, instance), version=version)
def test_obj_to_primitive_recursive(self): class MyList(base.ObjectListBase, base.NovaObject): pass mylist = MyList() mylist.objects = [MyObj(), MyObj()] for i, value in enumerate(mylist): value.foo = i self.assertEqual([{ 'foo': 0 }, { 'foo': 1 }], base.obj_to_primitive(mylist))
def _get_candidate_destination(self, image, instance_type, attempted_hosts): instance_p = obj_base.obj_to_primitive(self.instance) request_spec = { 'instance_properties': instance_p, 'instance_type': instance_type, 'instance_uuids': [self.instance.uuid] } if image: request_spec['image'] = image filter_properties = {'ignore_hosts': attempted_hosts} return self.scheduler_rpcapi.select_hosts(self.context, request_spec, filter_properties)[0]
def test_send_on_task_change(self): old = obj_base.obj_to_primitive(self.instance) old['task_state'] = None # pretend we just transitioned to task SPAWNING: self.instance.task_state = task_states.SPAWNING notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS)) self.assertEqual( 'instance.update', fake_notifier.VERSIONED_NOTIFICATIONS[0]['event_type'])
def check_can_live_migrate_destination(self, ctxt, instance, destination, block_migration, disk_over_commit): if self.client.can_send_version('2.38'): version = '2.38' else: version = '2.0' instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) cctxt = self.client.prepare(server=destination, version=version) return cctxt.call(ctxt, 'check_can_live_migrate_destination', instance=instance, block_migration=block_migration, disk_over_commit=disk_over_commit)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True, request_spec=None, host_lists=None): image_p = jsonutils.to_primitive(image) kwargs = {"instances": instances, "image": image_p, "filter_properties": filter_properties, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks, "security_groups": security_groups, "request_spec": request_spec, "host_lists": host_lists} version = '1.19' if not self.client.can_send_version(version): version = '1.18' kwargs.pop("host_lists") if not self.client.can_send_version(version): version = '1.10' kwargs.pop("request_spec") if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) kwargs["filter_properties"] = dict(filter_properties, instance_type=flavor_p) if not self.client.can_send_version(version): version = '1.8' nets = kwargs['requested_networks'].as_tuples() kwargs['requested_networks'] = nets if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kwargs.update({'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm}) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kwargs)
def resize_claim(self, context, instance, instance_type, limits=None): """Indicate that resources are needed for a resize operation to this compute host. :param context: security context :param instance: instance object to reserve resources for :param instance_type: new instance_type being resized to :param limits: Dict of oversubscription limits for memory, disk, and CPUs. :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ if self.disabled: # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: migration = self._create_migration(context, instance, instance_type) return claims.NopClaim(migration=migration) # get memory overhead required to build this instance: overhead = self.driver.estimate_instance_overhead(instance_type) LOG.debug( _("Memory overhead for %(flavor)d MB instance; %(overhead)d " "MB"), { 'flavor': instance_type['memory_mb'], 'overhead': overhead['memory_mb'] }) instance_ref = obj_base.obj_to_primitive(instance) claim = claims.ResizeClaim(instance_ref, instance_type, self, overhead=overhead) if claim.test(self.compute_node, limits): migration = self._create_migration(context, instance_ref, instance_type) claim.migration = migration # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance_ref, self.compute_node, migration) elevated = context.elevated() self._update(elevated, self.compute_node) return claim else: raise exception.ComputeResourcesUnavailable()
def confirm_resize(self, ctxt, instance, migration, host, reservations=None, cast=True): if self.client.can_send_version('2.39'): version = '2.39' else: instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) migration = jsonutils.to_primitive( objects_base.obj_to_primitive(migration)) version = '2.7' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) rpc_method = cctxt.cast if cast else cctxt.call return rpc_method(ctxt, 'confirm_resize', instance=instance, migration=migration, reservations=reservations)
def reboot_instance(self, ctxt, instance, block_device_info, reboot_type): if not self.can_send_version('2.32'): version = '2.23' instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) else: version = '2.32' self.cast(ctxt, self.make_msg('reboot_instance', instance=instance, block_device_info=block_device_info, reboot_type=reboot_type), topic=_compute_topic(self.topic, ctxt, None, instance), version=version)
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = instance_topology filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def output(migrations_obj): """Returns the desired output of the API from an object. From a MigrationsList's object this method returns a list of primitive objects with the only necessary fields. """ objects = obj_base.obj_to_primitive(migrations_obj) objects = [x for x in objects if not x['hidden']] for obj in objects: del obj['deleted'] del obj['deleted_at'] del obj['migration_type'] del obj['hidden'] return objects
def reboot_instance(self, ctxt, instance, block_device_info, reboot_type): if not self.client.can_send_version('2.32'): version = '2.23' instance = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) else: version = '2.32' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'reboot_instance', instance=instance, block_device_info=block_device_info, reboot_type=reboot_type)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True, request_spec=None, host_list=None, do_cast=False): kw = {'instance': instance, 'scheduler_hint': scheduler_hint, 'live': live, 'rebuild': rebuild, 'flavor': flavor, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'reservations': reservations, 'clean_shutdown': clean_shutdown, 'request_spec': request_spec, 'host_list': host_list, } version = '1.20' if not self.client.can_send_version(version): del kw['host_list'] version = '1.13' if not self.client.can_send_version(version): del kw['request_spec'] version = '1.11' if not self.client.can_send_version(version): del kw['clean_shutdown'] version = '1.10' if not self.client.can_send_version(version): kw['flavor'] = objects_base.obj_to_primitive(flavor) version = '1.6' if not self.client.can_send_version(version): kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) version = '1.4' cctxt = self.client.prepare( version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) if do_cast: return cctxt.cast(context, 'migrate_server', **kw) return cctxt.call(context, 'migrate_server', **kw)
def test_obj_to_primitive_list(self): class MyObjElement(base.NovaObject): fields = {'foo': fields.IntegerField()} def __init__(self, foo): super(MyObjElement, self).__init__() self.foo = foo class MyList(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyObjElement')} mylist = MyList() mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)] self.assertEqual([1, 2, 3], [x['foo'] for x in base.obj_to_primitive(mylist)])
def build_instances(self, ctxt, **kwargs): """Build instances.""" build_inst_kwargs = kwargs instances = build_inst_kwargs['instances'] instances_p = [jsonutils.to_primitive(inst) for inst in instances] build_inst_kwargs['instances'] = instances_p build_inst_kwargs['image'] = jsonutils.to_primitive( build_inst_kwargs['image']) if 'filter_properties' in build_inst_kwargs: flavor = build_inst_kwargs['filter_properties']['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) build_inst_kwargs['filter_properties']['instance_type'] = flavor_p cctxt = self.client.prepare(version='1.8') cctxt.cast(ctxt, 'build_instances', build_inst_kwargs=build_inst_kwargs)
def resize_instance(self, ctxt, instance, migration, image, instance_type, reservations=None, clean_shutdown=True): msg_args = {'instance': instance, 'migration': migration, 'image': image, 'reservations': reservations, 'instance_type': instance_type, 'clean_shutdown': clean_shutdown, } version = '4.1' if not self.client.can_send_version(version): msg_args['instance_type'] = objects_base.obj_to_primitive( instance_type) version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resize_instance', **msg_args)
def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None): # NOTE(ndipanov): Remove volume_id and mountpoint on the next major # version bump - they are not needed when using bdm objects. version = '3.16' kw = {'instance': instance, 'volume_id': volume_id, 'mountpoint': mountpoint, 'bdm': bdm} if not self.client.can_send_version(version): # NOTE(russellb) Havana compat version = self._get_compat_version('3.0', '2.0') kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) del kw['bdm'] cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'attach_volume', **kw)
def _fake_migration_get_in_progress_by_host_and_node( self, ctxt, host, node): status = ['confirmed', 'reverted', 'error'] migrations = [] for migration in self._migrations.values(): migration = obj_base.obj_to_primitive(migration) if migration['status'] in status: continue uuid = migration['instance_uuid'] migration['instance'] = self._instances[uuid] migrations.append(migration) return migrations