def setUp(self, mock_init_agg, mock_init_inst): super(HostManagerTestCase, self).setUp() self.flags(scheduler_available_filters=['%s.%s' % (__name__, cls) for cls in ['FakeFilterClass1', 'FakeFilterClass2']]) self.flags(scheduler_default_filters=['FakeFilterClass1']) self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x, 'fake-node') for x in range(1, 5)] self.fake_hosts += [host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ host_manager.HostState('fake_host%s' % x, 'fake-node') for x in xrange(1, 5) ] self.fake_hosts += [ host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5) ] self.addCleanup(timeutils.clear_time_override)
def setUp(self, mock_init_agg, mock_init_inst): super(OpieHostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ nova_host_manager.HostState('fake_host%s' % x, 'fake-node') for x in range(1, 5) ] self.fake_hosts += [ nova_host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in range(1, 5) ] self.useFixture(fixtures.SpawnIsSynchronousFixture())
def new_host_state(self, host, topic, capabilities=None, service=None, nodename=None): """Returns an instance of BaremetalHostState or HostState according to capabilities. If 'baremetal_driver' is in capabilities, it returns an instance of BaremetalHostState. If not, returns an instance of HostState. """ if capabilities is None: capabilities = {} cap = capabilities.get(topic, {}) if bool(cap.get('baremetal_driver')): return BaremetalNodeState(host, topic, capabilities, service, nodename=nodename) else: return host_manager.HostState(host, topic, capabilities, service, nodename=nodename)
def test_stat_consumption_from_compute_node_non_pci(self): stats = [ dict(key='num_instances', value='5'), dict(key='num_proj_12345', value='3'), dict(key='num_proj_23456', value='1'), dict(key='num_vm_%s' % vm_states.BUILDING, value='2'), dict(key='num_vm_%s' % vm_states.SUSPENDED, value='1'), dict(key='num_task_%s' % task_states.RESIZE_MIGRATING, value='1'), dict(key='num_task_%s' % task_states.MIGRATING, value='2'), dict(key='num_os_type_linux', value='4'), dict(key='num_os_type_windoze', value='1'), dict(key='io_workload', value='42'), ] hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, updated_at=None, host_ip='127.0.0.1', hypervisor_version=hyper_ver_int) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertIsNone(host.pci_stats) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ host_manager.HostState('fake_host%s' % x, 'fake-node') for x in xrange(1, 5) ]
def test_state_public_api_signatures(self): self.assertPublicAPISignatures( host_manager.HostState("dummy", "dummy"), ironic_host_manager.IronicNodeState("dummy", "dummy") )
def host_state_cls(self, host, node, **kwargs): """Factory function/property to create a new HostState""" compute = kwargs.get('compute') if compute and compute.get('cpu_info') == 'baremetal cpu': return IronicNodeState(host, node, **kwargs) else: return host_manager.HostState(host, node, **kwargs)
def test_stat_consumption_from_instance(self, numa_usage_mock): numa_usage_mock.return_value = 'fake-consumed-once' host = host_manager.HostState("fakehost", "fakenode") host.instance_numa_topology = 'fake-instance-topology' instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0, project_id='12345', vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, os_type='Linux', uuid='fake-uuid', numa_topology=None) host.consume_from_instance(instance) numa_usage_mock.assert_called_once_with(host, instance) self.assertEqual('fake-consumed-once', host.numa_topology) self.assertEqual('fake-instance-topology', instance['numa_topology']) numa_usage_mock.return_value = 'fake-consumed-twice' instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0, project_id='12345', vm_state=vm_states.PAUSED, task_state=None, os_type='Linux', uuid='fake-uuid', numa_topology=None) host.consume_from_instance(instance) self.assertEqual('fake-instance-topology', instance['numa_topology']) self.assertEqual(2, host.num_instances) self.assertEqual(1, host.num_io_ops) self.assertEqual(2, numa_usage_mock.call_count) self.assertEqual(((host, instance),), numa_usage_mock.call_args) self.assertEqual('fake-consumed-twice', host.numa_topology)
def test_stat_consumption_from_instance_with_pci_exception(self): fake_requests = [{'request_id': 'fake_request1', 'count': 3, 'spec': [{'vendor_id': '8086'}]}] fake_requests_obj = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(**r) for r in fake_requests], instance_uuid='fake-uuid') instance = objects.Instance(root_gb=0, ephemeral_gb=0, memory_mb=512, vcpus=1, project_id='12345', vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, os_type='Linux', uuid='fake-uuid', pci_requests=fake_requests_obj, id=1243) req_spec = sched_utils.build_request_spec(None, None, [instance], objects.Flavor( root_gb=0, ephemeral_gb=0, memory_mb=1024, vcpus=1)) host = host_manager.HostState("fakehost", "fakenode") self.assertIsNone(host.updated) fake_updated = mock.sentinel.fake_updated host.updated = fake_updated host.pci_stats = pci_stats.PciDeviceStats() with mock.patch.object(host.pci_stats, 'apply_requests', side_effect=exception.PciDeviceRequestFailed): host.consume_from_instance(req_spec['instance_properties']) self.assertEqual(fake_updated, host.updated)
def test_stat_consumption_from_compute_node_rescue_unshelving(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.UNSHELVING: '1', 'num_task_%s' % task_states.RESCUING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertEqual(5, host.num_instances) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertIsNone(host.pci_stats) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_state_public_api_signatures(self): self.assertPublicAPISignatures( host_manager.HostState("dummy", "dummy"), baremetal_host_manager.BaremetalNodeState("dummy", "dummy") )
def test_handles_deleted_instance(self): """Test instance deletion while being scheduled.""" def _raise_instance_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id='123') self.stubs.Set(driver, 'instance_update_db', _raise_instance_not_found) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') host_state = host_manager.HostState('host2', 'node2') weighted_host = weights.WeighedHost(host_state, 1.42) filter_properties = {} uuid = 'fake-uuid1' instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = { 'instance_type': { 'memory_mb': 1, 'local_gb': 1 }, 'instance_properties': instance_properties, 'instance_uuids': [uuid] } sched._provision_resource(fake_context, weighted_host, request_spec, filter_properties, None, None, None, None)
def test_stat_consumption_from_compute_node_rescue_unshelving(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.UNSHELVING: '1', 'num_task_%s' % task_states.RESCUING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } stats = jsonutils.dumps(stats) hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, updated_at=None, host_ip='127.0.0.1', hypervisor_version=hyper_ver_int, numa_topology=None) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertEqual(5, host.num_instances) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertIsNone(host.pci_stats) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_compute_node(self): stats = [ dict(key='num_instances', value='5'), dict(key='num_proj_12345', value='3'), dict(key='num_proj_23456', value='1'), dict(key='num_vm_%s' % vm_states.BUILDING, value='2'), dict(key='num_vm_%s' % vm_states.SUSPENDED, value='1'), dict(key='num_task_%s' % task_states.RESIZE_MIGRATING, value='1'), dict(key='num_task_%s' % task_states.MIGRATING, value='2'), dict(key='num_os_type_linux', value='4'), dict(key='num_os_type_windoze', value='1'), dict(key='io_workload', value='42'), ] compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0) host = host_manager.HostState("fakehost", "faketopic") host.update_from_compute_node(compute) self.assertEqual(5, host.num_instances) self.assertEqual(3, host.num_instances_by_project['12345']) self.assertEqual(1, host.num_instances_by_project['23456']) self.assertEqual(2, host.vm_states[vm_states.BUILDING]) self.assertEqual(1, host.vm_states[vm_states.SUSPENDED]) self.assertEqual(1, host.task_states[task_states.RESIZE_MIGRATING]) self.assertEqual(2, host.task_states[task_states.MIGRATING]) self.assertEqual(4, host.num_instances_by_os_type['linux']) self.assertEqual(1, host.num_instances_by_os_type['windoze']) self.assertEqual(42, host.num_io_ops)
def test_stat_consumption_from_instance_with_pci_exception(self): fake_requests = [{'request_id': 'fake_request1', 'count': 3, 'spec': [{'vendor_id': '8086'}]}] fake_requests_obj = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(**r) for r in fake_requests], instance_uuid='fake-uuid') req_spec = objects.RequestSpec( instance_uuid='fake-uuid', project_id='12345', numa_topology=None, pci_requests=fake_requests_obj, flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=1024, vcpus=1)) host = host_manager.HostState("fakehost", "fakenode") self.assertIsNone(host.updated) fake_updated = mock.sentinel.fake_updated host.updated = fake_updated host.pci_stats = pci_stats.PciDeviceStats() with mock.patch.object(host.pci_stats, 'apply_requests', side_effect=exception.PciDeviceRequestFailed): host.consume_from_request(req_spec) self.assertEqual(fake_updated, host.updated)
def test_host_state_obj_to_dict_numa_topology_limits_conversion(self): """Tests that _host_state_obj_to_dict properly converts a NUMATopologyLimits object in the HostState.limits if found and that other unexpected objects aren't converted. """ host_state = host_manager.HostState('fake-host', 'fake-node', uuids.cell_uuid) # The NUMATopologyFilter sets host_state.limits['numa_topology'] to # a NUMATopologyLimits object which is what we want to verify gets # converted to a primitive in _host_state_obj_to_dict. numa_limits = objects.NUMATopologyLimits( cpu_allocation_ratio=CONF.cpu_allocation_ratio, ram_allocation_ratio=CONF.ram_allocation_ratio) host_state.limits['numa_topology'] = numa_limits # Set some other unexpected object to assert we don't convert it. ignored_limits = objects.SchedulerLimits() host_state.limits['ignored'] = ignored_limits result = manager._host_state_obj_to_dict(host_state) expected = { 'host': 'fake-host', 'nodename': 'fake-node', 'limits': { 'numa_topology': numa_limits.obj_to_primitive(), 'ignored': ignored_limits } } self.assertDictEqual(expected, result) # Make sure the original limits weren't changed. self.assertIsInstance(host_state.limits['numa_topology'], objects.NUMATopologyLimits)
def test_update_from_compute_node_resets_stashed_numa(self): hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( memory_mb=0, free_disk_gb=0, local_gb=0, metrics=None, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=fakes.NUMA_TOPOLOGY._to_json(), stats=None, pci_device_pools=None) host = host_manager.HostState("fakehost", "fakenode") host.instance_numa_topology = 'fake-instance-topology' host.update_from_compute_node(compute) self.assertIsNone(host.instance_numa_topology)
def test_resources_consumption_from_compute_node(self): metrics = [ dict(name='res1', value=1.0, source='source1', timestamp=None), dict(name='res2', value="string2", source='source2', timestamp=None), ] hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = dict(metrics=jsonutils.dumps(metrics), memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, updated_at=None, host_ip='127.0.0.1', hypervisor_version=hyper_ver_int, numa_topology=fakes.NUMA_TOPOLOGY.to_json()) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertEqual(len(host.metrics), 2) self.assertEqual(set(['res1', 'res2']), set(host.metrics.keys())) self.assertEqual(1.0, host.metrics['res1'].value) self.assertEqual('source1', host.metrics['res1'].source) self.assertEqual('string2', host.metrics['res2'].value) self.assertEqual('source2', host.metrics['res2'].source) self.assertIsInstance(host.numa_topology, six.string_types)
def host_state_cls(self, host, node, **kwargs): """Factory function/property to create a new HostState.""" compute = kwargs.get('compute') if compute and self._is_ironic_compute(compute): return IronicNodeState(host, node) else: return host_manager.HostState(host, node)
def host_state_cls(self, host, node, **kwargs): """Factory function/property to create a new HostState.""" compute = kwargs.get('compute') if compute and compute.get('hypervisor_type') == 'ironic': return IronicNodeState(host, node, **kwargs) else: return host_manager.HostState(host, node, **kwargs)
def test_stat_consumption_from_instance(self): host = host_manager.HostState("fakehost", "faketopic") instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0, project_id='12345', vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, os_type='Linux') host.consume_from_instance(instance) instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0, project_id='12345', vm_state=vm_states.PAUSED, task_state=None, os_type='Linux') host.consume_from_instance(instance) self.assertEqual(2, host.num_instances) self.assertEqual(2, host.num_instances_by_project['12345']) self.assertEqual(1, host.vm_states[vm_states.BUILDING]) self.assertEqual(1, host.vm_states[vm_states.PAUSED]) self.assertEqual(1, host.task_states[task_states.SCHEDULING]) self.assertEqual(1, host.task_states[None]) self.assertEqual(2, host.num_instances_by_os_type['Linux']) self.assertEqual(1, host.num_io_ops)
def host_state_cls(self, host, node, **kwargs): """Factory function/property to create a new HostState.""" compute = kwargs.get('compute') get_ht = lambda c: (c.hypervisor_type if 'hypervisor_type' in c else None) if compute and get_ht(compute) == hv_type.IRONIC: return IronicNodeState(host, node) else: return host_manager.HostState(host, node)
def new_host_state(self, host, node, compute=None): """Returns an instance of BaremetalNodeState or HostState according to compute['cpu_info']. If 'cpu_info' equals 'baremetal cpu', it returns an instance of BaremetalNodeState. If not, returns an instance of HostState. """ if compute and compute.get('cpu_info') == 'baremetal cpu': return BaremetalNodeState(host, node, compute=compute) else: return host_manager.HostState(host, node, compute=compute)
def _generate_fake_hosts(num): hosts = [] for i in range(num): fake_host_state = host_manager.HostState("host%s" % i, "fake_node", uuids.cell) fake_host_state.uuid = getattr(uuids, "host%s" % i) fake_host_state.limits = {} hosts.append(fake_host_state) return hosts
def _get_fake_host_state(self, index=0): host_state = host_manager.HostState('host_%s' % index, 'node_%s' % index) host_state.free_ram_mb = 50000 host_state.service = { "disabled": False, "updated_at": timeutils.utcnow(), "created_at": timeutils.utcnow(), } return host_state
def test_from_host_state_az_via_config(self): """Tests the scenario that the host is not in an aggregate with the availability_zone metadata key so the AZ comes from config. """ host_state = host_manager.HostState('host', 'node', uuids.cell_uuid) host_state.uuid = uuids.compute_node_uuid host_state.limits = {} host_state.aggregates = [] selection = objects.Selection.from_host_state(host_state) self.assertEqual(CONF.default_availability_zone, selection.availability_zone)
def test_get_cost_functions(self): self.flags(reserved_host_memory_mb=128) fixture = fakes.FakeFilterScheduler() fns = fixture.get_cost_functions() self.assertEquals(len(fns), 1) weight, fn = fns[0] self.assertEquals(weight, -1.0) hostinfo = host_manager.HostState('host', 'compute') hostinfo.update_from_compute_node( dict(memory_mb=1000, local_gb=0, vcpus=1)) self.assertEquals(1000 - 128, fn(hostinfo, {}))
def test_stat_consumption_from_compute_node(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.RESIZE_MIGRATING: '1', 'num_task_%s' % task_states.MIGRATING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } stats = jsonutils.dumps(stats) hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = dict(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_instances='{}', hypervisor_version=hyper_ver_int) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertEqual(5, host.num_instances) self.assertEqual(3, host.num_instances_by_project['12345']) self.assertEqual(1, host.num_instances_by_project['23456']) self.assertEqual(2, host.vm_states[vm_states.BUILDING]) self.assertEqual(1, host.vm_states[vm_states.SUSPENDED]) self.assertEqual(1, host.task_states[task_states.RESIZE_MIGRATING]) self.assertEqual(2, host.task_states[task_states.MIGRATING]) self.assertEqual(4, host.num_instances_by_os_type['linux']) self.assertEqual(1, host.num_instances_by_os_type['windoze']) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertEqual('127.0.0.1', host.host_ip) self.assertEqual('htype', host.hypervisor_type) self.assertEqual('hostname', host.hypervisor_hostname) self.assertEqual('cpu_info', host.cpu_info) self.assertEqual({}, host.supported_instances) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def new_host_state(self, host, node, capabilities=None, service=None): """Returns an instance of IronicNodeState or HostState according to capabilities. If 'ironic_driver' is in capabilities, it returns an instance of IronicHostState. If not, returns an instance of HostState. """ if capabilities is None: capabilities = {} cap = capabilities.get('compute', {}) if bool(cap.get('ironic_driver')): return IronicNodeState(host, node, capabilities, service) else: return host_manager.HostState(host, node, capabilities, service)