def test_image_properties_filter_fails_different_hyper_version(self):
     img_props = objects.ImageMeta(properties=objects.ImageMetaProps(
         hw_architecture=arch.X86_64,
         img_hv_type=hv_type.KVM,
         hw_vm_mode=vm_mode.HVM,
         img_hv_requested_version='>=6.2'))
     hypervisor_version = versionutils.convert_version_to_int('6.0.0')
     spec_obj = objects.RequestSpec(image=img_props)
     capabilities = {
         'enabled': True,
         'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)],
         'hypervisor_version': hypervisor_version
     }
     host = fakes.FakeHostState('host1', 'node1', capabilities)
     self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
Ejemplo n.º 2
0
 def test_image_properties_filter_baremetal_vmmode_compat(self):
     # if an old image has 'baremetal' for vmmode it should be
     # treated as hvm
     img_props = objects.ImageMeta(properties=objects.ImageMetaProps(
         hw_vm_mode='baremetal'))
     hypervisor_version = versionutils.convert_version_to_int('6.0.0')
     spec_obj = objects.RequestSpec(image=img_props)
     capabilities = {
         'supported_instances':
         [(obj_fields.Architecture.I686, hv_type.BAREMETAL, vm_mode.HVM)],
         'hypervisor_version':
         hypervisor_version
     }
     host = fakes.FakeHostState('host1', 'node1', capabilities)
     self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
Ejemplo n.º 3
0
 def test_image_properties_filter_passes_without_hyper_version(self):
     img_props = objects.ImageMeta(properties=objects.ImageMetaProps(
         hw_architecture=obj_fields.Architecture.X86_64,
         img_hv_type=hv_type.KVM,
         hw_vm_mode=vm_mode.HVM,
         img_hv_requested_version='>=6.0'))
     spec_obj = objects.RequestSpec(image=img_props)
     capabilities = {
         'enabled':
         True,
         'supported_instances':
         [(obj_fields.Architecture.X86_64, hv_type.KVM, vm_mode.HVM)]
     }
     host = fakes.FakeHostState('host1', 'node1', capabilities)
     self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
 def test_image_properties_filter_fails_partial_inst_props(self):
     img_props = objects.ImageMeta(
         properties=objects.ImageMetaProps(
             hw_architecture=obj_fields.Architecture.X86_64,
             hw_vm_mode=obj_fields.VMMode.HVM))
     hypervisor_version = versionutils.convert_version_to_int('6.0.0')
     spec_obj = objects.RequestSpec(image=img_props)
     capabilities = {
         'supported_instances': [(
             obj_fields.Architecture.X86_64,
             obj_fields.HVType.XEN,
             obj_fields.VMMode.XEN)],
         'hypervisor_version': hypervisor_version}
     host = fakes.FakeHostState('host1', 'node1', capabilities)
     self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
 def test_image_properties_filter_xen_hv_type_compat(self):
     # if an old image has 'xapi' for hv_type it should be treated as xen
     img_props = objects.ImageMeta(
         properties=objects.ImageMetaProps(
             img_hv_type='xapi'))
     hypervisor_version = versionutils.convert_version_to_int('6.0.0')
     spec_obj = objects.RequestSpec(image=img_props)
     capabilities = {
         'supported_instances': [(
             obj_fields.Architecture.I686,
             obj_fields.HVType.XEN,
             obj_fields.VMMode.HVM)],
         'hypervisor_version': hypervisor_version}
     host = fakes.FakeHostState('host1', 'node1', capabilities)
     self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
Ejemplo n.º 6
0
 def _do_test_isolated_hosts(
         self,
         host_in_list,
         image_in_list,
         set_flags=True,
         restrict_isolated_hosts_to_isolated_images=True):
     if set_flags:
         self.flags(isolated_images=[uuids.image_ref],
                    isolated_hosts=['isolated_host'],
                    restrict_isolated_hosts_to_isolated_images=
                    restrict_isolated_hosts_to_isolated_images)
     host_name = 'isolated_host' if host_in_list else 'free_host'
     image_ref = uuids.image_ref if image_in_list else uuids.fake_image_ref
     spec_obj = objects.RequestSpec(image=objects.ImageMeta(id=image_ref))
     host = fakes.FakeHostState(host_name, 'node', {})
     return self.filt_cls.host_passes(host, spec_obj)
Ejemplo n.º 7
0
 def test_aggregate_image_properties_isolation_props_namespace(
         self, agg_mock):
     self.flags(aggregate_image_properties_isolation_namespace='hw',
                group='filter_scheduler')
     self.flags(aggregate_image_properties_isolation_separator='_',
                group='filter_scheduler')
     agg_mock.return_value = {
         'hw_vm_mode': set(['hvm']),
         'img_owner_id': set(['foo'])
     }
     spec_obj = objects.RequestSpec(
         context=mock.sentinel.ctx,
         image=objects.ImageMeta(properties=objects.ImageMetaProps(
             hw_vm_mode='hvm', img_owner_id='wrong')))
     host = fakes.FakeHostState('host1', 'compute', {})
     self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
Ejemplo n.º 8
0
    def test_require_image_type_support_adds_trait(self, mock_log):
        self.flags(query_placement_for_image_type_support=True,
                   group='scheduler')
        reqspec = objects.RequestSpec(
            image=objects.ImageMeta(disk_format='raw'),
            flavor=objects.Flavor(extra_specs={}),
            is_bfv=False)
        # Assert that we add the trait to the flavor as required
        request_filter.require_image_type_support(self.context, reqspec)
        self.assertEqual({'trait:COMPUTE_IMAGE_TYPE_RAW': 'required'},
                         reqspec.flavor.extra_specs)
        self.assertEqual(set(), reqspec.flavor.obj_what_changed())

        log_lines = [c[0][0] for c in mock_log.debug.call_args_list]
        self.assertIn('added required trait', log_lines[0])
        self.assertIn('took %.1f seconds', log_lines[1])
Ejemplo n.º 9
0
 def test_image_properties_filter_passes_same_inst_props_and_version(self):
     img_props = objects.ImageMeta(properties=objects.ImageMetaProps(
         hw_architecture=obj_fields.Architecture.X86_64,
         img_hv_type=hv_type.KVM,
         hw_vm_mode=vm_mode.HVM,
         img_hv_requested_version='>=6.0,<6.2'))
     spec_obj = objects.RequestSpec(image=img_props)
     hypervisor_version = versionutils.convert_version_to_int('6.0.0')
     capabilities = {
         'supported_instances':
         [(obj_fields.Architecture.X86_64, hv_type.KVM, vm_mode.HVM)],
         'hypervisor_version':
         hypervisor_version
     }
     host = fakes.FakeHostState('host1', 'node1', capabilities)
     self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
Ejemplo n.º 10
0
    def _do_test_numa_topology_filter_cpu_policy(
            self, numa_topology, cpu_policy, cpu_thread_policy, passes,
            mock_pinning_requested):
        instance_topology = objects.InstanceNUMATopology(
            cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
                   objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
               ])
        spec_obj = objects.RequestSpec(numa_topology=instance_topology,
                                       pci_requests=None,
                                       instance_uuid=uuids.fake)

        extra_specs = [
            {},
            {
                'hw:cpu_policy': cpu_policy,
                'hw:cpu_thread_policy': cpu_thread_policy,
            }
        ]
        image_props = [
            {},
            {
                'hw_cpu_policy': cpu_policy,
                'hw_cpu_thread_policy': cpu_thread_policy,
            }
        ]
        host = fakes.FakeHostState('host1', 'node1', {
            'numa_topology': numa_topology,
            'pci_stats': None,
            'cpu_allocation_ratio': 1,
            'ram_allocation_ratio': 1.5})
        assertion = self.assertTrue if passes else self.assertFalse

        # test combinations of image properties and extra specs
        for specs, props in itertools.product(extra_specs, image_props):
            # ...except for the one where no policy is specified
            if specs == props == {}:
                continue

            fake_flavor = objects.Flavor(memory_mb=1024, extra_specs=specs)
            fake_image_props = objects.ImageMetaProps(**props)
            fake_image = objects.ImageMeta(properties=fake_image_props)

            spec_obj.image = fake_image
            spec_obj.flavor = fake_flavor

            assertion(self.filt_cls.host_passes(host, spec_obj))
            self.assertIsNone(spec_obj.numa_topology.cells[0].cpu_pinning)
Ejemplo n.º 11
0
    def test_schedule_not_all_instance_clean_claimed(self, mock_get_hosts,
            mock_get_all_states, mock_claim, mock_cleanup, mock_get_by_uuid,
            mock_get_by_instance_uuid):
        """Tests that we clean up previously-allocated instances if not all
        instances could be scheduled
        """
        spec_obj = objects.RequestSpec(
            num_instances=2,
            flavor=objects.Flavor(memory_mb=512,
                                  root_gb=512,
                                  ephemeral_gb=0,
                                  swap=0,
                                  vcpus=1,
                                  extra_specs={}),
            project_id=uuids.project_id,
            instance_group=None,
            instance_uuid='00000000-aaaa-bbbb-cccc-000000000000',
            image=objects.ImageMeta(properties=objects.ImageMetaProps()),
            numa_topology=None)

        host_state = mock.Mock(spec=host_manager.HostState,
            host=mock.sentinel.host, uuid=uuids.cn1, numa_topology=None)
        all_host_states = [host_state]
        mock_get_all_states.return_value = all_host_states
        mock_get_hosts.side_effect = [
            all_host_states,  # first return all the hosts (only one)
            [],  # then act as if no more hosts were found that meet criteria
        ]
        mock_claim.return_value = True

        instance_uuids = [uuids.instance1, uuids.instance2]
        allocs = \
           [{'allocations': [
                {'resources': {'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 512},
                 'resource_provider': {'uuid': uuids.cn1}}
              ]
            }]
        alloc_reqs_by_rp_uuid = {
            uuids.cn1: allocs,
        }
        ctx = mock.Mock()
        self.driver._schedule(ctx, spec_obj, instance_uuids,
            alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)

        # Ensure we cleaned up the first successfully-claimed instance
        mock_cleanup.assert_called_once_with([uuids.instance1])
Ejemplo n.º 12
0
 def test_isolate_agg_trait_on_flavor_destination_not_set(self,
                                                          mock_getnotmd):
     mock_getnotmd.return_value = []
     traits = set(['HW_GPU_API_DXVA', 'HW_NIC_DCB_ETS'])
     fake_flavor = objects.Flavor(
         vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
         extra_specs={'trait:' + trait: 'required' for trait in traits})
     fake_image = objects.ImageMeta(
         properties=objects.ImageMetaProps(
             traits_required=[]))
     reqspec = objects.RequestSpec(flavor=fake_flavor, image=fake_image)
     result = request_filter.isolate_aggregates(self.context, reqspec)
     self.assertTrue(result)
     self.assertNotIn('requested_destination', reqspec)
     keys = ['trait:%s' % trait for trait in traits]
     mock_getnotmd.assert_called_once_with(
         self.context, utils.ItemsMatcher(keys), 'trait:', value='required')
Ejemplo n.º 13
0
    def setUp(self):
        super(MigrationTaskTestCase, self).setUp()
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = FakeContext(self.user_id, self.project_id)
        # Normally RequestContext.cell_uuid would be set when targeting
        # the context in nova.conductor.manager.targets_cell but we just
        # fake it here.
        self.context.cell_uuid = uuids.cell1
        self.flavor = fake_flavor.fake_flavor_obj(self.context)
        self.flavor.extra_specs = {'extra_specs': 'fake'}
        inst = fake_instance.fake_db_instance(image_ref='image_ref',
                                              instance_type=self.flavor)
        inst_object = objects.Instance(
            flavor=self.flavor,
            numa_topology=None,
            pci_requests=None,
            system_metadata={'image_hw_disk_bus': 'scsi'})
        self.instance = objects.Instance._from_db_object(
            self.context, inst_object, inst, [])
        self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
        self.host_lists = [[
            objects.Selection(service_host="host1",
                              nodename="node1",
                              cell_uuid=uuids.cell1)
        ]]
        self.filter_properties = {
            'limits': {},
            'retry': {
                'num_attempts': 1,
                'hosts': [['host1', 'node1']]
            }
        }
        self.reservations = []
        self.clean_shutdown = True

        _p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
        self.heal_reqspec_is_bfv_mock = _p.start()
        self.addCleanup(_p.stop)

        _p = mock.patch('nova.objects.RequestSpec.ensure_network_information')
        self.ensure_network_information_mock = _p.start()
        self.addCleanup(_p.stop)

        self.mock_network_api = mock.Mock()
Ejemplo n.º 14
0
 def test_select_destination_with_4_3_client(self, mock_get_ac, mock_rfrs):
     fake_spec = objects.RequestSpec(
         flavor=objects.Flavor(vcpus=1, extra_specs={}),
         image=objects.ImageMeta(properties=objects.ImageMetaProps()),
         numa_topology=None)
     place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
     mock_get_ac.return_value = place_res
     expected_alloc_reqs_by_rp_uuid = {
         cn.uuid: [fakes.ALLOC_REQS[x]]
         for x, cn in enumerate(fakes.COMPUTE_NODES)
     }
     with mock.patch.object(self.manager.driver,
                            'select_destinations') as select_destinations:
         self.manager.select_destinations(None, spec_obj=fake_spec)
         select_destinations.assert_called_once_with(
             None, fake_spec, None, expected_alloc_reqs_by_rp_uuid,
             mock.sentinel.p_sums)
         mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
Ejemplo n.º 15
0
    def test_require_image_type_support_adds_trait(self, mock_log):
        self.flags(query_placement_for_image_type_support=True,
                   group='scheduler')
        reqspec = objects.RequestSpec(
            image=objects.ImageMeta(disk_format='raw'),
            flavor=objects.Flavor(extra_specs={}),
            is_bfv=False)
        self.assertEqual(0, len(reqspec.root_required))
        self.assertEqual(0, len(reqspec.root_forbidden))

        # Request filter puts the trait into the request spec
        request_filter.require_image_type_support(self.context, reqspec)
        self.assertEqual({ot.COMPUTE_IMAGE_TYPE_RAW}, reqspec.root_required)
        self.assertEqual(0, len(reqspec.root_forbidden))

        log_lines = [c[0][0] for c in mock_log.debug.call_args_list]
        self.assertIn('added required trait', log_lines[0])
        self.assertIn('took %.1f seconds', log_lines[1])
Ejemplo n.º 16
0
 def _test_select_destination(self, get_allocation_candidates_response,
                              mock_get_ac, mock_rfrs, mock_get_res,
                              mock_get_all):
     fake_spec = objects.RequestSpec(
         flavor=objects.Flavor(vcpus=1, extra_specs={}),
         image=objects.ImageMeta(properties=objects.ImageMetaProps()),
         numa_topology=None)
     fake_spec.instance_uuid = uuids.instance
     place_res = get_allocation_candidates_response
     mock_get_ac.return_value = place_res
     with mock.patch.object(self.manager.driver,
                            'select_destinations') as select_destinations:
         self.assertRaises(messaging.rpc.dispatcher.ExpectedException,
                           self.manager.select_destinations,
                           None,
                           spec_obj=fake_spec,
                           instance_uuids=[fake_spec.instance_uuid])
         select_destinations.assert_not_called()
         mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
Ejemplo n.º 17
0
 def test_transform_image_metadata(self):
     self.flags(image_metadata_prefilter=True, group='scheduler')
     properties = objects.ImageMetaProps(
         hw_disk_bus=objects.fields.DiskBus.SATA,
         hw_cdrom_bus=objects.fields.DiskBus.IDE,
         hw_video_model=objects.fields.VideoModel.QXL,
         hw_vif_model=network_model.VIF_MODEL_VIRTIO)
     reqspec = objects.RequestSpec(
         image=objects.ImageMeta(properties=properties),
         flavor=objects.Flavor(extra_specs={}),
     )
     self.assertTrue(request_filter.transform_image_metadata(None, reqspec))
     expected = {
         'COMPUTE_GRAPHICS_MODEL_QXL',
         'COMPUTE_NET_VIF_MODEL_VIRTIO',
         'COMPUTE_STORAGE_BUS_IDE',
         'COMPUTE_STORAGE_BUS_SATA',
     }
     self.assertEqual(expected, reqspec.root_required)
Ejemplo n.º 18
0
    def setUp(self):
        super(MigrationTaskTestCase, self).setUp()
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = FakeContext(self.user_id, self.project_id)
        self.flavor = fake_flavor.fake_flavor_obj(self.context)
        self.flavor.extra_specs = {'extra_specs': 'fake'}
        inst = fake_instance.fake_db_instance(image_ref='image_ref',
                                              instance_type=self.flavor)
        inst_object = objects.Instance(
            flavor=self.flavor,
            numa_topology=None,
            pci_requests=None,
            system_metadata={'image_hw_disk_bus': 'scsi'})
        self.instance = objects.Instance._from_db_object(
            self.context, inst_object, inst, [])
        self.request_spec = objects.RequestSpec(
            image=objects.ImageMeta(properties=objects.ImageMetaProps()),
            flavor=self.flavor)
        self.request_spec.instance_group = None
        self.hosts = [dict(host='host1', nodename=None, limits={})]
        self.filter_properties = {
            'limits': {},
            'retry': {
                'num_attempts': 1,
                'hosts': [['host1', None]]
            }
        }

        self.instance_group = objects.InstanceGroup()
        self.instance_group['metadetails'] = {
            'wrs-sg:best_effort': 'false',
            'wrs-sg:group_size': '2'
        }
        self.instance_group['members'] = ['uuid1', 'uuid2']
        self.instance_group['hosts'] = ['compute1', 'compute2']
        self.instance_group['policies'] = ['anti-affinity']

        self.reservations = []
        self.clean_shutdown = True
Ejemplo n.º 19
0
    def test_to_legacy_request_spec_dict(self, image_to_primitive,
                                         spec_to_legacy_instance):
        fake_image_dict = mock.Mock()
        image_to_primitive.return_value = fake_image_dict
        fake_instance = {
            'root_gb': 1.0,
            'ephemeral_gb': 1.0,
            'memory_mb': 1.0,
            'vcpus': 1,
            'numa_topology': None,
            'pci_requests': None,
            'project_id': fakes.FAKE_PROJECT_ID,
            'availability_zone': 'nova',
            'uuid': '1'
        }
        spec_to_legacy_instance.return_value = fake_instance

        fake_flavor = objects.Flavor(root_gb=10,
                                     ephemeral_gb=0,
                                     memory_mb=512,
                                     vcpus=1)
        spec = objects.RequestSpec(
            num_instances=1,
            image=objects.ImageMeta(),
            # instance properties
            numa_topology=None,
            pci_requests=None,
            project_id=1,
            availability_zone='nova',
            instance_uuid=uuids.instance,
            flavor=fake_flavor)
        spec_dict = spec.to_legacy_request_spec_dict()
        expected = {
            'num_instances': 1,
            'image': fake_image_dict,
            'instance_properties': fake_instance,
            'instance_type': fake_flavor
        }
        self.assertEqual(expected, spec_dict)
Ejemplo n.º 20
0
 def setUp(self):
     super(MigrationTaskTestCase, self).setUp()
     self.user_id = 'fake'
     self.project_id = 'fake'
     self.context = FakeContext(self.user_id, self.project_id)
     self.flavor = fake_flavor.fake_flavor_obj(self.context)
     self.flavor.extra_specs = {'extra_specs': 'fake'}
     inst = fake_instance.fake_db_instance(image_ref='image_ref',
                                           instance_type=self.flavor)
     inst_object = objects.Instance(
         flavor=self.flavor,
         numa_topology=None,
         pci_requests=None,
         system_metadata={'image_hw_disk_bus': 'scsi'})
     self.instance = objects.Instance._from_db_object(
         self.context, inst_object, inst, [])
     self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
     self.hosts = [dict(host='host1', nodename=None, limits={})]
     self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1,
                               'hosts': [['host1', None]]}}
     self.reservations = []
     self.clean_shutdown = True
Ejemplo n.º 21
0
 def test_isolate_aggregates_union(self, mock_getnotmd):
     agg_traits = {
         'trait:HW_GPU_API_DXVA': 'required',
         'trait:CUSTOM_XYZ_TRAIT': 'required'
     }
     mock_getnotmd.return_value = [
         objects.Aggregate(uuid=uuids.agg2,
                           metadata={
                               'trait:CUSTOM_WINDOWS_LICENSED_TRAIT':
                               'required',
                               'trait:CUSTOM_XYZ_TRAIT': 'required'
                           }),
         objects.Aggregate(uuid=uuids.agg4,
                           metadata={
                               'trait:HW_GPU_API_DXVA': 'required',
                               'trait:HW_NIC_DCB_ETS': 'required'
                           }),
     ]
     fake_flavor = objects.Flavor(vcpus=1,
                                  memory_mb=1024,
                                  root_gb=10,
                                  ephemeral_gb=5,
                                  swap=0,
                                  extra_specs=agg_traits)
     fake_image = objects.ImageMeta(properties=objects.ImageMetaProps(
         traits_required=[]))
     reqspec = objects.RequestSpec(flavor=fake_flavor, image=fake_image)
     reqspec.requested_destination = objects.Destination(
         forbidden_aggregates={uuids.agg1})
     result = request_filter.isolate_aggregates(self.context, reqspec)
     self.assertTrue(result)
     self.assertEqual(
         ','.join(sorted([uuids.agg1, uuids.agg2, uuids.agg4])), ','.join(
             sorted(reqspec.requested_destination.forbidden_aggregates)))
     mock_getnotmd.assert_called_once_with(self.context,
                                           utils.ItemsMatcher(agg_traits),
                                           'trait:',
                                           value='required')
Ejemplo n.º 22
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)

        # WRS: Determine resources consumed for placement candidate check,
        vcpus = spec_obj.flavor.vcpus
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties

        # WRS: The request_spec has stale numa_topology, so must be updated.
        # We can get stale numa_topology if we do an evacuation or
        # live-migration after a resize,
        instance_type = spec_obj.flavor
        image_meta = objects.ImageMeta(properties=image_props)
        try:
            spec_obj.numa_topology = \
                hardware.numa_get_constraints(instance_type, image_meta)
        except Exception as ex:
            LOG.error("Cannot get numa constraints, error=%(err)r",
                      {'err': ex})

        instance_numa_topology = spec_obj.numa_topology
        # WRS: If cpu_thread_policy is ISOLATE and compute has hyperthreading
        # enabled, vcpus claim will be double flavor.vcpus.  Since we don't
        # know the compute node at this point, we'll just request flavor.vcpus
        # and let the numa_topology filter sort this out.
        numa_cell = objects.NUMACell(siblings=[])
        numa_topology = objects.NUMATopology(cells=[numa_cell])._to_json()
        computenode = objects.ComputeNode(numa_topology=numa_topology)
        normalized_resources = \
                  utils.normalized_resources_for_placement_claim(
                             resources, computenode, vcpus, extra_specs,
                             image_props, instance_numa_topology)

        alloc_reqs_by_rp_uuid, provider_summaries = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(
                normalized_resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries = None, None
            else:
                alloc_reqs, provider_summaries = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")

                # Determine the rejection reasons for all hosts based on
                # placement vcpu, memory, and disk criteria. This is done
                # after-the-fact since the placement query does not return
                # any reasons.
                reasons = self.placement_client.get_rejection_reasons(
                    requested=normalized_resources)
                if reasons is None:
                    reasons = {}

                # Populate per-host rejection map based on placement criteria.
                host_states = self.driver.host_manager.get_all_host_states(
                    ctxt)
                for host_state in host_states:
                    if host_state.uuid in reasons:
                        msg = reasons[host_state.uuid]
                        if msg:
                            nova_utils.filter_reject('Placement',
                                                     host_state,
                                                     spec_obj,
                                                     msg,
                                                     append=False)

                reason = 'Placement service found no hosts.'
                filter_properties = spec_obj.to_legacy_filter_properties_dict()
                utils.NoValidHost_extend(filter_properties, reason=reason)
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rr in ar['allocations']:
                        rp_uuid = rr['resource_provider']['uuid']
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
                                                alloc_reqs_by_rp_uuid,
                                                provider_summaries)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Ejemplo n.º 23
0
 def test_image_meta_object_passed(self):
     image_meta = objects.ImageMeta()
     claim = self._claim(image_meta=image_meta)
     self.assertIsInstance(claim.image_meta, objects.ImageMeta)
Ejemplo n.º 24
0
 def _test_resources_from_request_spec(self, expected, flavor,
                                       image=objects.ImageMeta()):
     fake_spec = objects.RequestSpec(flavor=flavor, image=image)
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertResourceRequestsEqual(expected, resources)
     return resources
Ejemplo n.º 25
0
    def test_with_tenant_and_az_and_traits(self, mock_getmd, mock_getnotmd):
        mock_getmd.side_effect = [
            # Tenant filter
            [
                objects.Aggregate(uuid=uuids.agg1,
                                  metadata={'filter_tenant_id': 'owner'}),
                objects.Aggregate(uuid=uuids.agg2,
                                  metadata={'filter_tenant_id:12': 'owner'}),
                objects.Aggregate(uuid=uuids.agg3,
                                  metadata={'other_key': 'owner'})
            ],
            # AZ filter
            [
                objects.Aggregate(uuid=uuids.agg4,
                                  metadata={'availability_zone': 'myaz'})
            ],
        ]

        mock_getnotmd.side_effect = [
            # isolate_aggregates filter
            [
                objects.Aggregate(uuid=uuids.agg1,
                                  metadata={
                                      'trait:CUSTOM_WINDOWS_LICENSED_TRAIT':
                                      'required'
                                  }),
                objects.Aggregate(uuid=uuids.agg2,
                                  metadata={
                                      'trait:CUSTOM_WINDOWS_LICENSED_TRAIT':
                                      'required',
                                      'trait:CUSTOM_XYZ_TRAIT': 'required'
                                  }),
                objects.Aggregate(
                    uuid=uuids.agg3,
                    metadata={'trait:CUSTOM_XYZ_TRAIT': 'required'}),
            ],
        ]

        traits = set(['HW_GPU_API_DXVA', 'HW_NIC_DCB_ETS'])
        fake_flavor = objects.Flavor(
            vcpus=1,
            memory_mb=1024,
            root_gb=10,
            ephemeral_gb=5,
            swap=0,
            extra_specs={'trait:' + trait: 'required'
                         for trait in traits})
        fake_image = objects.ImageMeta(properties=objects.ImageMetaProps(
            traits_required=[]))
        reqspec = objects.RequestSpec(project_id='owner',
                                      availability_zone='myaz',
                                      flavor=fake_flavor,
                                      image=fake_image)
        request_filter.process_reqspec(self.context, reqspec)
        self.assertEqual(
            ','.join(sorted([uuids.agg1, uuids.agg2])), ','.join(
                sorted(
                    reqspec.requested_destination.aggregates[0].split(','))))
        self.assertEqual(
            ','.join(sorted([uuids.agg4])), ','.join(
                sorted(
                    reqspec.requested_destination.aggregates[1].split(','))))
        self.assertItemsEqual(
            set([uuids.agg1, uuids.agg2, uuids.agg3]),
            reqspec.requested_destination.forbidden_aggregates)
        mock_getmd.assert_has_calls([
            mock.call(self.context, value='owner'),
            mock.call(self.context, key='availability_zone', value='myaz')
        ])

        keys = ['trait:%s' % trait for trait in traits]
        mock_getnotmd.assert_called_once_with(self.context,
                                              utils.ItemsMatcher(keys),
                                              'trait:',
                                              value='required')
Ejemplo n.º 26
0
    def _test_schedule_provider_network(self, scheduler_hints,
                         mock_get_all, mock_by_host, mock_get_by_binary,
                         mock_get_all_states):

        vif_dict = dict(vif_model='virtio')
        fake_vif1 = fake_network_cache_model.new_vif(vif_dict)
        fake_vif2 = fake_network_cache_model.new_vif(vif_dict)
        fake_vif2['network']['id'] = 2
        fake_vif3 = fake_network_cache_model.new_vif(vif_dict)
        fake_vif3['network']['id'] = 3
        fake_vif4 = fake_network_cache_model.new_vif(vif_dict)
        fake_vif4['network']['id'] = 4
        fake_nw_info = network_model.NetworkInfo([fake_vif1, fake_vif2,
                                                  fake_vif3, fake_vif4])
        fake_info_cache = objects.InstanceInfoCache(network_info=fake_nw_info)
        fake_inst = objects.Instance(info_cache=fake_info_cache)
        fake_build_req = objects.BuildRequest(instance=fake_inst)

        @staticmethod
        def _fake_get_by_instance_uuid(context, instance_uuid):
            return fake_build_req

        @staticmethod
        def _fake_get_by_uuid(context, instance_uuid):
            return fake_inst

        self.stub_out('nova.objects.BuildRequest.get_by_instance_uuid',
                      _fake_get_by_instance_uuid)
        self.stub_out('nova.objects.Instance.get_by_uuid', _fake_get_by_uuid)

        def _fake_net_get_dict(context, network_uuid):
            if network_uuid == 1:
                return {'provider:physical_network': 'physnet0'}
            elif network_uuid == 2:
                return {'provider:physical_network': 'physnet1'}
            elif network_uuid == 3:
                return {'provider:physical_network': 'physnet1'}
            else:
                return {}

        from nova.network.neutronv2 import api as neutronapi
        self.driver.network_api = neutronapi.API()
        self.stubs.Set(self.driver.network_api, 'get_dict',
                      _fake_net_get_dict)

        spec_obj = objects.RequestSpec(
            num_instances=1,
            flavor=objects.Flavor(memory_mb=512,
                                  root_gb=512,
                                  ephemeral_gb=0,
                                  vcpus=1,
                                  extra_specs={}),
            project_id=1,
            os_type='Linux',
            instance_uuid='00000000-aaaa-bbbb-cccc-000000000000',
            pci_requests=None,
            numa_topology=None,
            instance_group=None,
            scheduler_hints=scheduler_hints,
            # WRS extension
            display_name = 'fake-vm',
            name = 'instance-00000001',
            image=objects.ImageMeta(properties=objects.ImageMetaProps()))

        host_state = mock.Mock(spec=host_manager.HostState,
            host=mock.sentinel.host, uuid=uuids.cn1, numa_topology=None)
        all_host_states = [host_state]
        mock_get_all_states.return_value = all_host_states

        instance_uuids = [uuids.instance]
        allocs = \
           [{'allocations': [
                {'resources': {'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 512},
                 'resource_provider': {'uuid': uuids.cn1}}
              ]
            }]
        alloc_reqs_by_rp_uuid = {
            uuids.cn1: allocs,
        }

        with mock.patch.object(self.driver.host_manager,
                               'get_filtered_hosts') as mock_get_hosts:
            mock_get_hosts.side_effect = fake_get_filtered_hosts
            hosts = self.driver._schedule(self.context, spec_obj,
                instance_uuids, alloc_reqs_by_rp_uuid,
                mock.sentinel.provider_summaries)

        self.assertEqual(len(hosts), 1)

        return spec_obj
Ejemplo n.º 27
0
    def test_schedule_instance_group(self, mock_get_hosts,
            mock_get_all_states, mock_claim, mock_get_by_uuid,
            mock_get_by_instance_uuid):
        """Test that since the request spec object contains an instance group
        object, that upon choosing a host in the primary schedule loop,
        that we update the request spec's instance group information
        """
        num_instances = 2
        ig = objects.InstanceGroup(hosts=[])
        spec_obj = objects.RequestSpec(
            num_instances=num_instances,
            flavor=objects.Flavor(memory_mb=512,
                                  root_gb=512,
                                  ephemeral_gb=0,
                                  swap=0,
                                  vcpus=1,
                                  extra_specs={}),
            project_id=uuids.project_id,
            instance_group=ig,
            instance_uuid='00000000-aaaa-bbbb-cccc-000000000000',
            image=objects.ImageMeta(properties=objects.ImageMetaProps()),
            numa_topology=None)

        hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
            uuid=uuids.cn1, numa_topology=None)
        hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
            uuid=uuids.cn2, numa_topology=None)
        all_host_states = [hs1, hs2]
        mock_get_all_states.return_value = all_host_states
        mock_claim.return_value = True

        resources = {'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 512}
        allocs1 = \
           [{'allocations': [
                {'resources': resources,
                 'resource_provider': {'uuid': uuids.cn1}}
              ]
            }]
        allocs2 = \
           [{'allocations': [
                {'resources': resources,
                 'resource_provider': {'uuid': uuids.cn2}}
              ]
            }]

        alloc_reqs_by_rp_uuid = {
            uuids.cn1: allocs1,
            uuids.cn2: allocs2,
        }

        # Simulate host 1 and host 2 being randomly returned first by
        # _get_sorted_hosts() in the two iterations for each instance in
        # num_instances
        mock_get_hosts.side_effect = ([hs2, hs1], [hs1, hs2])
        instance_uuids = [
            getattr(uuids, 'instance%d' % x) for x in range(num_instances)
        ]
        ctx = mock.Mock()
        self.driver._schedule(ctx, spec_obj, instance_uuids,
            alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)

        # Check that we called _claim_resources() for both the first and second
        # host state
        claim_calls = [
            mock.call(ctx.elevated.return_value, spec_obj,
                uuids.instance0, allocs2),
            mock.call(ctx.elevated.return_value, spec_obj,
                uuids.instance1, allocs1),
        ]
        mock_claim.assert_has_calls(claim_calls)

        # Check that _get_sorted_hosts() is called twice and that the
        # second time, we pass it the hosts that were returned from
        # _get_sorted_hosts() the first time
        sorted_host_calls = [
            mock.call(spec_obj, all_host_states, 0),
            mock.call(spec_obj, [hs2, hs1], 1),
        ]
        mock_get_hosts.assert_has_calls(sorted_host_calls)

        # The instance group object should have both host1 and host2 in its
        # instance group hosts list and there should not be any "changes" to
        # save in the instance group object
        self.assertEqual(['host2', 'host1'], ig.hosts)
        self.assertEqual({}, ig.obj_get_changes())
Ejemplo n.º 28
0
#    License for the specific language governing permissions and limitations
#    under the License.
#
# Copyright (c) 2016-2017 Wind River Systems, Inc.
#

# import mock

from nova import objects
from nova.scheduler.filters import core_filter
from nova import test
from nova.tests.unit.scheduler import fakes

FLAVOR_1 = objects.Flavor(vcpus=1, extra_specs={})
FLAVOR_2 = objects.Flavor(vcpus=2, extra_specs={})
IMAGE_PROPS = objects.ImageMeta(properties=objects.ImageMetaProps())


class TestCoreFilter(test.NoDBTestCase):

    def test_core_filter_passes(self):
        self.filt_cls = core_filter.CoreFilter()
        spec_obj = objects.RequestSpec(flavor=objects.Flavor(vcpus=1,
                                                             extra_specs={}),
                                       image=IMAGE_PROPS)
        cpu_allocation_ratio = 2
        host = fakes.FakeHostState('host1', 'node1',
                {'vcpus_total': 4, 'vcpus_used': 7 / cpu_allocation_ratio,
                 'cpu_allocation_ratio': cpu_allocation_ratio})
        self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
Ejemplo n.º 29
0
    def __init__(self, request_spec):
        """Create a new instance of ResourceRequest from a RequestSpec.

        Examines the flavor, flavor extra specs, and (optional) image metadata
        of the provided ``request_spec``.

        For extra specs, items of the following form are examined:

        - ``resources:$RESOURCE_CLASS``: $AMOUNT
        - ``resources$N:$RESOURCE_CLASS``: $AMOUNT
        - ``trait:$TRAIT_NAME``: "required"
        - ``trait$N:$TRAIT_NAME``: "required"

        .. note::

            This does *not* yet handle ``member_of[$N]``.

        For image metadata, traits are extracted from the ``traits_required``
        property, if present.

        For the flavor, ``VCPU``, ``MEMORY_MB`` and ``DISK_GB`` are calculated
        from Flavor properties, though these are only used if they aren't
        overridden by flavor extra specs.

        :param request_spec: An instance of ``objects.RequestSpec``.
        """
        # { ident: RequestGroup }
        self._rg_by_id = {}
        self._group_policy = None
        # Default to the configured limit but _limit can be
        # set to None to indicate "no limit".
        self._limit = CONF.scheduler.max_placement_results

        # TODO(efried): Handle member_of[$N], which will need to be reconciled
        # with destination.aggregates handling in resources_from_request_spec

        image = (request_spec.image if 'image' in request_spec else
                 objects.ImageMeta(properties=objects.ImageMetaProps()))

        # Parse the flavor extra specs
        self._process_extra_specs(request_spec.flavor)

        self.numbered_groups_from_flavor = self.get_num_of_numbered_groups()

        # Now parse the (optional) image metadata
        self._process_image_meta(image)

        # Finally, parse the flavor itself, though we'll only use these fields
        # if they don't conflict with something already provided by the flavor
        # extra specs. These are all added to the unnumbered request group.
        merged_resources = self.merged_resources()

        if orc.VCPU not in merged_resources:
            self._add_resource(None, orc.VCPU, request_spec.vcpus)

        if orc.MEMORY_MB not in merged_resources:
            self._add_resource(None, orc.MEMORY_MB, request_spec.memory_mb)

        if orc.DISK_GB not in merged_resources:
            disk = request_spec.ephemeral_gb
            disk += compute_utils.convert_mb_to_ceil_gb(request_spec.swap)
            if 'is_bfv' not in request_spec or not request_spec.is_bfv:
                disk += request_spec.root_gb

            if disk:
                self._add_resource(None, orc.DISK_GB, disk)

        self._translate_memory_encryption(request_spec.flavor, image)

        self.strip_zeros()