def test_dicts_host_json(self): host = {'numa_topology': self.hosttopo.to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def test_object_host_instance(self): host = objects.ComputeNode(numa_topology=self.hosttopo.to_json()) instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def test_dicts_instance_json(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': self.instancetopo.to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, hw.VirtNUMAHostTopology) self._check_usage(res)
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus self.updated = timeutils.utcnow() # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') if pci_requests and pci_requests.requests and self.pci_stats: self.pci_stats.apply_requests(pci_requests.requests) # Calculate the numa usage updated_numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) self.numa_topology = updated_numa_topology vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING ]: self.num_io_ops += 1
def test_never_serialize_result(self): host = {"numa_topology": self.hosttopo.to_json()} instance = {"numa_topology": self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance, never_serialize_result=True) self.assertIsInstance(res, hw.VirtNUMAHostTopology) self._check_usage(res)
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus self.updated = timeutils.utcnow() # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. Thus, it has a .pci_requests field, which gets converted # to a primitive early on, and is thus a dict here. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: self.pci_stats.apply_requests(pci_requests.requests) # Calculate the numa usage updated_numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) self.numa_topology = updated_numa_topology vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING ]: self.num_io_ops += 1
def host_passes(self, host_state, filter_properties): ram_ratio = CONF.ram_allocation_ratio cpu_ratio = CONF.cpu_allocation_ratio instance = filter_properties.get('instance_properties', {}) instance_topology = hardware.instance_topology_from_instance(instance) if instance_topology: if host_state.numa_topology: limit_cells = [] usage_after_instance = ( hardware.get_host_numa_usage_from_instance( host_state, instance, never_serialize_result=True)) for cell in usage_after_instance.cells: max_cell_memory = int(cell.memory * ram_ratio) max_cell_cpu = len(cell.cpuset) * cpu_ratio if (cell.memory_usage > max_cell_memory or cell.cpu_usage > max_cell_cpu): return False limit_cells.append( hardware.VirtNUMATopologyCellLimit( cell.id, cell.cpuset, cell.memory, max_cell_cpu, max_cell_memory)) host_state.limits['numa_topology'] = ( hardware.VirtNUMALimitTopology( cells=limit_cells).to_json()) return True else: return False else: return True
def _update_usage(self, usage, sign=1): mem_usage = usage['memory_mb'] overhead = self.driver.estimate_instance_overhead(usage) mem_usage += overhead['memory_mb'] self.compute_node.memory_mb_used += sign * mem_usage self.compute_node.local_gb_used += sign * usage.get('root_gb', 0) self.compute_node.local_gb_used += sign * usage.get('ephemeral_gb', 0) self.compute_node.vcpus_used += sign * usage.get('vcpus', 0) # free ram and disk may be negative, depending on policy: self.compute_node.free_ram_mb = (self.compute_node.memory_mb - self.compute_node.memory_mb_used) self.compute_node.free_disk_gb = (self.compute_node.local_gb - self.compute_node.local_gb_used) self.compute_node.running_vms = self.stats.num_instances self.ext_resources_handler.update_from_instance(usage, sign) # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance( self.compute_node, usage, free) self.compute_node.numa_topology = updated_numa_topology
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus self.updated = timeutils.utcnow() # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') if pci_requests and pci_requests.requests and self.pci_stats: self.pci_stats.apply_requests(pci_requests.requests) # Calculate the numa usage updated_numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) self.numa_topology = updated_numa_topology vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING]: self.num_io_ops += 1
def _update_usage(self, context, resources, usage, sign=1): mem_usage = usage['memory_mb'] overhead = self.driver.estimate_instance_overhead(usage) mem_usage += overhead['memory_mb'] resources['memory_mb_used'] += sign * mem_usage resources['local_gb_used'] += sign * usage.get('root_gb', 0) resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0) bandwidth_used = usage['network_bandwidth']; resources['bandwidth_used'] += sign * bandwidth_used; # free ram and disk may be negative, depending on policy: resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances self.ext_resources_handler.update_from_instance(usage, sign) # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance( resources, usage, free) resources['numa_topology'] = updated_numa_topology
def test_dicts_instance_json(self): host = {"numa_topology": self.hosttopo} instance = {"numa_topology": self.instancetopo.to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, hw.VirtNUMAHostTopology) self._check_usage(res)
def _update_usage(self, usage, nodename, sign=1): mem_usage = usage['memory_mb'] disk_usage = usage.get('root_gb', 0) vcpus_usage = usage.get('vcpus', 0) overhead = self.driver.estimate_instance_overhead(usage) mem_usage += overhead['memory_mb'] disk_usage += overhead.get('disk_gb', 0) vcpus_usage += overhead.get('vcpus', 0) cn = self.compute_nodes[nodename] cn.memory_mb_used += sign * mem_usage cn.local_gb_used += sign * disk_usage cn.local_gb_used += sign * usage.get('ephemeral_gb', 0) cn.vcpus_used += sign * vcpus_usage # free ram and disk may be negative, depending on policy: cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used cn.free_disk_gb = cn.local_gb - cn.local_gb_used cn.running_vms = self.stats.num_instances # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance( cn, usage, free) cn.numa_topology = updated_numa_topology
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus self.updated = timeutils.utcnow() # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. Thus, it has a .pci_requests field, which gets converted # to a primitive early on, and is thus a dict here. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: self.pci_stats.apply_requests(pci_requests.requests) # Calculate the numa usage updated_numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) self.numa_topology = updated_numa_topology vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING]: self.num_io_ops += 1
def test_dicts_host_json(self): host = {"numa_topology": self.hosttopo.to_json()} instance = {"numa_topology": self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def test_object_host_instance(self): host = objects.ComputeNode(numa_topology=self.hosttopo.to_json()) instance = {"numa_topology": self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def test_never_serialize_result(self): host = {'numa_topology': self.hosttopo.to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance, never_serialize_result=True) self.assertIsInstance(res, hw.VirtNUMAHostTopology) self._check_usage(res)
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance["root_gb"] + instance["ephemeral_gb"]) * 1024 ram_mb = instance["memory_mb"] vcpus = instance["vcpus"] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus now = timeutils.utcnow() # NOTE(sbauza): Objects are UTC tz-aware by default self.updated = now.replace(tzinfo=iso8601.iso8601.Utc()) # Track number of instances on host self.num_instances += 1 pci_requests = instance.get("pci_requests") # NOTE(danms): Instance here is still a dict, which is converted from # an object. The pci_requests are a dict as well. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests["requests"] and self.pci_stats: pci_requests = objects.InstancePCIRequests.from_request_spec_instance_props(pci_requests) pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(self) instance_numa_topology = hardware.instance_topology_from_instance(instance) instance["numa_topology"] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get("numa_topology"), pci_requests=pci_requests, pci_stats=self.pci_stats, ) if pci_requests: instance_cells = None if instance["numa_topology"]: instance_cells = instance["numa_topology"].cells self.pci_stats.apply_requests(pci_requests, instance_cells) self.numa_topology = hardware.get_host_numa_usage_from_instance(self, instance) vm_state = instance.get("vm_state", vm_states.BUILDING) task_state = instance.get("task_state") if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING, ]: self.num_io_ops += 1
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus now = timeutils.utcnow() # NOTE(sbauza): Objects are UTC tz-aware by default self.updated = now.replace(tzinfo=iso8601.iso8601.Utc()) # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. The pci_requests are a dict as well. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: pci_requests = objects.InstancePCIRequests \ .from_request_spec_instance_props(pci_requests) pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = hardware.instance_topology_from_instance( instance) instance['numa_topology'] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if instance['numa_topology']: instance_cells = instance['numa_topology'].cells self.pci_stats.apply_requests(pci_requests, instance_cells) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING ]: self.num_io_ops += 1
def test_object_instance_with_load(self): host = objects.ComputeNode(numa_topology=self.hosttopo.to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, uuid=fake_uuid) with mock.patch.object(objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called)
def test_instance_serialized_by_base_obj_to_primitive(self): host = objects.ComputeNode(numa_topology=self.hosttopo.to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=objects.InstanceNUMATopology.obj_from_topology( self.instancetopo)) instance_raw = base_obj.obj_to_primitive(instance) res = hw.get_host_numa_usage_from_instance(host, instance_raw) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def test_object_instance_with_load(self): host = objects.ComputeNode(numa_topology=self.hosttopo.to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, uuid=fake_uuid) with mock.patch.object(objects.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called)
def test_attr_host(self): class Host(object): def __init__(obj): obj.numa_topology = self.hosttopo.to_json() host = Host() instance = {'numa_topology': self.instancetopo.to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def test_attr_host(self): class Host(object): def __init__(obj): obj.numa_topology = self.hosttopo.to_json() host = Host() instance = {"numa_topology": self.instancetopo.to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def consume_from_request(self, spec_obj): """Incrementally update host state from an RequestSpec object.""" disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024 ram_mb = spec_obj.memory_mb vcpus = spec_obj.vcpus self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # Track number of instances on host self.num_instances += 1 pci_requests = spec_obj.pci_requests if pci_requests and self.pci_stats: pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = spec_obj.numa_topology spec_obj.numa_topology = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if spec_obj.numa_topology: instance_cells = spec_obj.numa_topology.cells self.pci_stats.apply_requests(pci_requests, instance_cells) # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those # NUMA helpers because now we're 100% sure that spec_obj.numa_topology # is an InstanceNUMATopology object. Unfortunately, since # HostState.host_numa_topology is still limbo between an NUMATopology # object (when updated by consume_from_request), a ComputeNode object # (when updated by update_from_compute_node), we need to keep the call # to get_host_numa_usage_from_instance until it's fixed (and use a # temporary orphaned Instance object as a proxy) instance = objects.Instance(numa_topology=spec_obj.numa_topology) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) # NOTE(sbauza): By considering all cases when the scheduler is called # and when consume_from_request() is run, we can safely say that there # is always an IO operation because we want to move the instance self.num_io_ops += 1
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. The pci_requests are a dict as well. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: pci_requests = objects.InstancePCIRequests \ .from_request_spec_instance_props(pci_requests) pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = hardware.instance_topology_from_instance( instance) instance['numa_topology'] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if instance['numa_topology']: instance_cells = instance['numa_topology'].cells self.pci_stats.apply_requests(pci_requests, instance_cells) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING]: self.num_io_ops += 1
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus now = timeutils.utcnow() # NOTE(sbauza): Objects are UTC tz-aware by default self.updated = now.replace(tzinfo=iso8601.iso8601.Utc()) # Track number of instances on host self.num_instances += 1 instance_numa_topology = hardware.instance_topology_from_instance( instance) instance_cells = None if instance_numa_topology: instance_cells = instance_numa_topology.cells pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. Thus, it has a .pci_requests field, which gets converted # to a primitive early on, and is thus a dict here. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: pci_requests = pci_requests.requests self.pci_stats.apply_requests(pci_requests, instance_cells) # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance['numa_topology'] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING]: self.num_io_ops += 1
def test_instance_serialized_by_build_request_spec(self): host = objects.ComputeNode(numa_topology=self.hosttopo.to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance( context=self.context, id=1, uuid=fake_uuid, numa_topology=objects.InstanceNUMATopology.obj_from_topology(self.instancetopo), ) # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec # We can remove this test once we no longer use that method. instance_raw = jsonutils.to_primitive(base_obj.obj_to_primitive(instance)) res = hw.get_host_numa_usage_from_instance(host, instance_raw) self.assertIsInstance(res, six.string_types) self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. The pci_requests are a dict as well. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: pci_requests = objects.InstancePCIRequests \ .from_request_spec_instance_props(pci_requests) pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = hardware.instance_topology_from_instance( instance) instance['numa_topology'] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if instance['numa_topology']: instance_cells = instance['numa_topology'].cells self.pci_stats.apply_requests(pci_requests, instance_cells) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) # NOTE(sbauza): By considering all cases when the scheduler is called # and when consume_from_instance() is run, we can safely say that there # is always an IO operation because we want to move the instance self.num_io_ops += 1
def _update_usage(self, context, resources, usage, sign=1): """override the parent method for vm boot from volume case """ mem_usage = usage['memory_mb'] overhead = self.driver.estimate_instance_overhead(usage) mem_usage += overhead['memory_mb'] resources['memory_mb_used'] += sign * mem_usage #if the vm is boot form volume, we shouldn't calculate the disk usage if not h_utils.is_boot_from_volume(context, usage): resources['local_gb_used'] += sign * usage.get('root_gb', 0) resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0) # free ram and disk may be negative, depending on policy: resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances self.ext_resources_handler.update_from_instance(usage, sign) # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance( resources, usage, free) if updated_numa_topology: updated_numa_topology = jsonutils.loads(updated_numa_topology) # The following statements is to keep numa siblings in resources resource_numa_topology= jsonutils.loads(resources['numa_topology']) updated_cells = updated_numa_topology.get( 'nova_object.data', {}).get('cells', []) res_cells = resource_numa_topology.get( 'nova_object.data', {}).get('cells', []) # NOTE, we assume the order is constant for res_cell, updated_cell in zip(res_cells, updated_cells): res_cell_date = res_cell.get('nova_object.data') updated_cell_date = updated_cell.get('nova_object.data') if res_cell_date['id'] == updated_cell_date['id']: updated_cell_date['siblings'] = res_cell_date['siblings'] updated_numa_topology = jsonutils.dumps(updated_numa_topology) resources['numa_topology'] = updated_numa_topology
def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') if pci_requests and self.pci_stats: pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = hardware.instance_topology_from_instance( instance) instance['numa_topology'] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if instance['numa_topology']: instance_cells = instance['numa_topology'].cells self.pci_stats.apply_requests(pci_requests, instance_cells) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) # NOTE(sbauza): By considering all cases when the scheduler is called # and when consume_from_instance() is run, we can safely say that there # is always an IO operation because we want to move the instance self.num_io_ops += 1
def _update_usage(self, context, resources, usage, sign=1): mem_usage = usage["memory_mb"] overhead = self.driver.estimate_instance_overhead(usage) mem_usage += overhead["memory_mb"] resources["memory_mb_used"] += sign * mem_usage resources["local_gb_used"] += sign * usage.get("root_gb", 0) resources["local_gb_used"] += sign * usage.get("ephemeral_gb", 0) # free ram and disk may be negative, depending on policy: resources["free_ram_mb"] = resources["memory_mb"] - resources["memory_mb_used"] resources["free_disk_gb"] = resources["local_gb"] - resources["local_gb_used"] resources["running_vms"] = self.stats.num_instances self.ext_resources_handler.update_from_instance(usage, sign) # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance(resources, usage, free) resources["numa_topology"] = updated_numa_topology
def _update_usage(self, context, resources, usage, sign=1): mem_usage = usage['memory_mb'] overhead = self.driver.estimate_instance_overhead(usage) mem_usage += overhead['memory_mb'] resources['memory_mb_used'] += sign * mem_usage resources['local_gb_used'] += sign * usage.get('root_gb', 0) resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0) # free ram and disk may be negative, depending on policy: resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances self.ext_resources_handler.update_from_instance(usage, sign) # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance( resources, usage, free) resources['numa_topology'] = updated_numa_topology
def _locked_consume_from_request(self, spec_obj): disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024 ram_mb = spec_obj.memory_mb vcpus = spec_obj.vcpus # WRS - extra_specs are needed in multiple places below extra_specs = spec_obj.flavor.extra_specs image_props = spec_obj.image.properties # Track number of instances on host self.num_instances += 1 pci_requests = spec_obj.pci_requests if pci_requests and self.pci_stats: pci_requests = pci_requests.requests else: pci_requests = None # WRS: Support strict vs prefer allocation of PCI devices. pci_numa_affinity = extra_specs.get('hw:wrs:pci_numa_affinity', 'strict') pci_strict = False if pci_numa_affinity == 'prefer' else True # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = spec_obj.numa_topology spec_obj.numa_topology = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats, pci_strict=pci_strict) if pci_requests: instance_cells = None if spec_obj.numa_topology: instance_cells = spec_obj.numa_topology.cells self.pci_stats.apply_requests(pci_requests, instance_cells, pci_strict=pci_strict) # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those # NUMA helpers because now we're 100% sure that spec_obj.numa_topology # is an InstanceNUMATopology object. Unfortunately, since # HostState.host_numa_topology is still limbo between an NUMATopology # object (when updated by consume_from_request), a ComputeNode object # (when updated by update_from_compute_node), we need to keep the call # to get_host_numa_usage_from_instance until it's fixed (and use a # temporary orphaned Instance object as a proxy) instance = objects.Instance(numa_topology=spec_obj.numa_topology) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance, strict=True) if self.is_ironic: # Consume node's entire resources regardless of instance request self.free_ram_mb = 0 self.free_disk_mb = 0 self.vcpus_used = self.vcpus_total else: # Get set of reserved thread sibling pcpus that cannot be allocated # when using 'isolate' cpu_thread_policy. reserved = hardware.get_reserved_thread_sibling_pcpus( instance_numa_topology, host_numa_topology) threads_per_core = hardware._get_threads_per_core( host_numa_topology) # WRS - normalized vCPU accounting vcpus = hardware.normalized_vcpus( vcpus=vcpus, reserved=reserved, extra_specs=extra_specs, image_props=image_props, ratio=self.cpu_allocation_ratio, threads_per_core=threads_per_core) self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # NOTE(sbauza): By considering all cases when the scheduler is called # and when consume_from_request() is run, we can safely say that there # is always an IO operation because we want to move the instance self.num_io_ops += 1 # L3 CAT Support if ((instance.numa_topology is not None) and any(cell.cachetune_requested for cell in instance.numa_topology.cells)): self.l3_closids_used += 1
def _update_usage(instance, compute_node, free): updated_numa_topology = hardware.get_host_numa_usage_from_instance( compute_node, instance, free) compute_node.numa_topology = updated_numa_topology