Ejemplo n.º 1
0
    def test_resources_from_request_spec_aggregates(self):
        destination = objects.Destination()
        flavor = objects.Flavor(vcpus=1,
                                memory_mb=1024,
                                root_gb=1,
                                ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor,
                                      requested_destination=destination)

        destination.require_aggregates(['foo', 'bar'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([(
            'foo',
            'bar',
        )],
                         req.get_request_group(None).member_of)

        destination.require_aggregates(['baz'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([('foo', 'bar'), ('baz', )],
                         req.get_request_group(None).member_of)

        # Test stringification
        self.assertEqual(
            'RequestGroup(use_same_provider=False, '
            'resources={DISK_GB:1, MEMORY_MB:1024, VCPU:1}, '
            'traits=[], '
            'aggregates=[[baz], [foo, bar]])', str(req))
Ejemplo n.º 2
0
def require_tenant_aggregate(ctxt, request_spec):
    """Require hosts in an aggregate based on tenant id.

    This will modify request_spec to request hosts in an aggregate
    defined specifically for the tenant making the request. We do that
    by looking for a nova host aggregate with metadata indicating which
    tenant it is for, and passing that aggregate uuid to placement to
    limit results accordingly.
    """

    enabled = CONF.scheduler.limit_tenants_to_placement_aggregate
    agg_required = CONF.scheduler.placement_aggregate_required_for_tenants
    if not enabled:
        return

    aggregates = objects.AggregateList.get_by_metadata(
        ctxt, value=request_spec.project_id)
    aggregate_uuids_for_tenant = set([])
    for agg in aggregates:
        for key, value in agg.metadata.items():
            if key.startswith(TENANT_METADATA_KEY):
                aggregate_uuids_for_tenant.add(agg.uuid)
                break

    if aggregate_uuids_for_tenant:
        if ('requested_destination' not in request_spec
                or request_spec.requested_destination is None):
            request_spec.requested_destination = objects.Destination()
        destination = request_spec.requested_destination
        destination.require_aggregates(aggregate_uuids_for_tenant)
    elif agg_required:
        LOG.warning('Tenant %(tenant)s has no available aggregates',
                    {'tenant': request_spec.project_id})
        raise exception.RequestFilterFailed(
            reason=_('No hosts available for tenant'))
Ejemplo n.º 3
0
    def test_from_components(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        destination = objects.Destination(host='foo')
        instance = fake_instance.fake_instance_obj(ctxt)
        image = {
            'id': uuids.image_id,
            'properties': {
                'mappings': []
            },
            'status': 'fake-status',
            'location': 'far-away'
        }
        flavor = fake_flavor.fake_flavor_obj(ctxt)
        filter_properties = {'requested_destination': destination}
        instance_group = None

        spec = objects.RequestSpec.from_components(
            ctxt, instance.uuid, image, flavor, instance.numa_topology,
            instance.pci_requests, filter_properties, instance_group,
            instance.availability_zone, objects.SecurityGroupList())
        # Make sure that all fields are set using that helper method
        skip = ['id', 'network_metadata', 'is_bfv']
        for field in [f for f in spec.obj_fields if f not in skip]:
            self.assertTrue(spec.obj_attr_is_set(field),
                            'Field: %s is not set' % field)
        # just making sure that the context is set by the method
        self.assertEqual(ctxt, spec._context)
        self.assertEqual(destination, spec.requested_destination)
Ejemplo n.º 4
0
 def test_from_primitives_with_requested_destination(self):
     destination = objects.Destination(host='foo')
     spec_dict = {}
     filt_props = {'requested_destination': destination}
     ctxt = context.RequestContext('fake', 'fake')
     spec = objects.RequestSpec.from_primitives(ctxt, spec_dict, filt_props)
     self.assertEqual(destination, spec.requested_destination)
Ejemplo n.º 5
0
def map_az_to_placement_aggregate(ctxt, request_spec):
    """Map requested nova availability zones to placement aggregates.

    This will modify request_spec to request hosts in an aggregate that
    matches the desired AZ of the user's request.
    """
    if not CONF.scheduler.query_placement_for_availability_zone:
        return False

    az_hint = request_spec.availability_zone
    if not az_hint:
        return False

    aggregates = objects.AggregateList.get_by_metadata(ctxt,
                                                       key='availability_zone',
                                                       value=az_hint)
    if aggregates:
        if ('requested_destination' not in request_spec
                or request_spec.requested_destination is None):
            request_spec.requested_destination = objects.Destination()
        agg_uuids = [agg.uuid for agg in aggregates]
        request_spec.requested_destination.require_aggregates(agg_uuids)
        LOG.debug(
            'map_az_to_placement_aggregate request filter added '
            'aggregates %s for az %r', ','.join(agg_uuids), az_hint)

    return True
Ejemplo n.º 6
0
 def test_isolate_aggregates_union(self, mock_getnotmd):
     agg_traits = {'trait:HW_GPU_API_DXVA': 'required',
                   'trait:CUSTOM_XYZ_TRAIT': 'required'}
     mock_getnotmd.return_value = [
         objects.Aggregate(
             uuid=uuids.agg2,
             metadata={'trait:CUSTOM_WINDOWS_LICENSED_TRAIT': 'required',
                       'trait:CUSTOM_XYZ_TRAIT': 'required'}),
         objects.Aggregate(
             uuid=uuids.agg4,
             metadata={'trait:HW_GPU_API_DXVA': 'required',
                       'trait:HW_NIC_DCB_ETS': 'required'}),
     ]
     fake_flavor = objects.Flavor(
         vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
         extra_specs=agg_traits)
     fake_image = objects.ImageMeta(
         properties=objects.ImageMetaProps(
             traits_required=[]))
     reqspec = objects.RequestSpec(flavor=fake_flavor, image=fake_image)
     reqspec.requested_destination = objects.Destination(
         forbidden_aggregates={uuids.agg1})
     result = request_filter.isolate_aggregates(self.context, reqspec)
     self.assertTrue(result)
     self.assertEqual(
         ','.join(sorted([uuids.agg1, uuids.agg2, uuids.agg4])),
         ','.join(sorted(
             reqspec.requested_destination.forbidden_aggregates)))
     mock_getnotmd.assert_called_once_with(
         self.context, utils.ItemsMatcher(agg_traits), 'trait:',
         value='required')
Ejemplo n.º 7
0
    def _execute(self):
        # TODO(sbauza): Remove that once prep_resize() accepts a  RequestSpec
        # object in the signature and all the scheduler.utils methods too
        legacy_spec = self.request_spec.to_legacy_request_spec_dict()
        legacy_props = self.request_spec.to_legacy_filter_properties_dict()
        scheduler_utils.setup_instance_group(self.context, self.request_spec)
        scheduler_utils.populate_retry(legacy_props,
                                       self.instance.uuid)

        # NOTE(sbauza): Force_hosts/nodes needs to be reset
        # if we want to make sure that the next destination
        # is not forced to be the original host
        self.request_spec.reset_forced_destinations()

        # NOTE(danms): Right now we only support migrate to the same
        # cell as the current instance, so request that the scheduler
        # limit thusly.
        instance_mapping = objects.InstanceMapping.get_by_instance_uuid(
            self.context, self.instance.uuid)
        LOG.debug('Requesting cell %(cell)s while migrating',
                  {'cell': instance_mapping.cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in self.request_spec and
                self.request_spec.requested_destination):
            self.request_spec.requested_destination.cell = (
                instance_mapping.cell_mapping)
        else:
            self.request_spec.requested_destination = objects.Destination(
                cell=instance_mapping.cell_mapping)

        migration = self._preallocate_migration()

        hosts = self.scheduler_client.select_destinations(
            self.context, self.request_spec, [self.instance.uuid])
        host_state = hosts[0]

        scheduler_utils.populate_filter_properties(legacy_props,
                                                   host_state)
        # context is not serializable
        legacy_props.pop('context', None)

        (host, node) = (host_state['host'], host_state['nodename'])

        self.instance.availability_zone = (
            availability_zones.get_host_availability_zone(
                self.context, host))

        # FIXME(sbauza): Serialize/Unserialize the legacy dict because of
        # oslo.messaging #1529084 to transform datetime values into strings.
        # tl;dr: datetimes in dicts are not accepted as correct values by the
        # rpc fake driver.
        legacy_spec = jsonutils.loads(jsonutils.dumps(legacy_spec))

        # RPC cast to the destination host to start the migration process.
        self.compute_rpcapi.prep_resize(
            self.context, self.instance, legacy_spec['image'],
            self.flavor, host, migration, self.reservations,
            request_spec=legacy_spec, filter_properties=legacy_props,
            node=node, clean_shutdown=self.clean_shutdown)
Ejemplo n.º 8
0
 def test_get_weighed_hosts_allow_cross_cell_move_false(self):
     """Tests the scenario that the source cell is set in the requested
     destination but it's not a cross cell move so the weights should all
     be 0.0.
     """
     request_spec = objects.RequestSpec(
         requested_destination=objects.Destination(cell=objects.CellMapping(
             uuid=uuids.cell1)))
     weighed_hosts = self._get_weighed_hosts(request_spec)
     self.assertTrue(all([wh.weight == 0.0 for wh in weighed_hosts]))
Ejemplo n.º 9
0
 def test_routed_networks_filter_not_enabled(self):
     self.assertIn(request_filter.routed_networks_filter,
                   request_filter.ALL_REQUEST_FILTERS)
     self.flags(query_placement_for_routed_network_aggregates=False,
                group='scheduler')
     reqspec = objects.RequestSpec(
         requested_destination=objects.Destination())
     self.assertFalse(request_filter.routed_networks_filter(
                      self.context, reqspec))
     # We don't add any aggregates
     self.assertIsNone(reqspec.requested_destination.aggregates)
Ejemplo n.º 10
0
 def test_get_weighed_hosts_no_requested_destination_or_cell(self):
     """Weights should all be 0.0 given there is no requested_destination
     or source cell in the RequestSpec, e.g. initial server create scenario.
     """
     # Test the requested_destination field not being set.
     request_spec = objects.RequestSpec()
     weighed_hosts = self._get_weighed_hosts(request_spec)
     self.assertTrue(all([wh.weight == 0.0 for wh in weighed_hosts]))
     # Test the requested_destination field being set to None.
     request_spec.requested_destination = None
     weighed_hosts = self._get_weighed_hosts(request_spec)
     self.assertTrue(all([wh.weight == 0.0 for wh in weighed_hosts]))
     # Test the requested_destination field being set but without the
     # cell field set.
     request_spec.requested_destination = objects.Destination()
     weighed_hosts = self._get_weighed_hosts(request_spec)
     self.assertTrue(all([wh.weight == 0.0 for wh in weighed_hosts]))
     # Test the requested_destination field being set with the cell field
     # set but to None.
     request_spec.requested_destination = objects.Destination(cell=None)
     weighed_hosts = self._get_weighed_hosts(request_spec)
     self.assertTrue(all([wh.weight == 0.0 for wh in weighed_hosts]))
Ejemplo n.º 11
0
    def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
        """Builds a RequestSpec that can be passed to select_destinations

        Used when calling the scheduler to pick a destination host for live
        migrating the instance.

        :param attempted_hosts: List of host names to ignore in the scheduler.
            This is generally at least seeded with the source host.
        :returns: nova.objects.RequestSpec object
        """
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way
            image = utils.get_image_from_system_metadata(
                self.instance.system_metadata)
            filter_properties = {'ignore_hosts': attempted_hosts}
            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image,
                self.instance.flavor, self.instance.numa_topology,
                self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone
            )
        else:
            request_spec = self.request_spec
            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()
        scheduler_utils.setup_instance_group(self.context, request_spec)

        # We currently only support live migrating to hosts in the same
        # cell that the instance lives in, so we need to tell the scheduler
        # to limit the applicable hosts based on cell.
        cell_mapping = self._get_source_cell_mapping()
        LOG.debug('Requesting cell %(cell)s while live migrating',
                  {'cell': cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in request_spec and
                request_spec.requested_destination):
            request_spec.requested_destination.cell = cell_mapping
        else:
            request_spec.requested_destination = objects.Destination(
                cell=cell_mapping)

        request_spec.ensure_project_and_user_id(self.instance)
        request_spec.ensure_network_metadata(self.instance)
        compute_utils.heal_reqspec_is_bfv(
            self.context, request_spec, self.instance)

        return request_spec
Ejemplo n.º 12
0
 def test_set_requested_destination_cell_allow_cross_cell_resize_true(
         self, mock_debug, mock_get_im):
     """Tests the scenario that the RequestSpec is configured for
     allow_cross_cell_resize=True.
     """
     task = self._generate_task()
     legacy_props = self.request_spec.to_legacy_filter_properties_dict()
     self.request_spec.requested_destination = objects.Destination(
         allow_cross_cell_move=True)
     task._set_requested_destination_cell(legacy_props)
     mock_get_im.assert_called_once_with(self.context, self.instance.uuid)
     mock_debug.assert_called_once()
     self.assertIn('Allowing migration from cell',
                   mock_debug.call_args[0][0])
Ejemplo n.º 13
0
 def test_set_requested_destination_cell_allow_cross_cell_resize_true_host(
         self, mock_debug, mock_get_im):
     """Tests the scenario that the RequestSpec is configured for
     allow_cross_cell_resize=True and there is a requested target host.
     """
     task = self._generate_task()
     legacy_props = self.request_spec.to_legacy_filter_properties_dict()
     self.request_spec.requested_destination = objects.Destination(
         allow_cross_cell_move=True, host='fake-host')
     task._set_requested_destination_cell(legacy_props)
     mock_get_im.assert_called_once_with(self.context, self.instance.uuid)
     mock_debug.assert_called_once()
     self.assertIn('Not restricting cell', mock_debug.call_args[0][0])
     self.assertIsNone(self.request_spec.requested_destination.cell)
Ejemplo n.º 14
0
    def test_save(self):
        req_obj = fake_request_spec.fake_spec_obj()
        # Make sure the requested_destination is not persisted since it is
        # only valid per request/operation.
        req_obj.requested_destination = objects.Destination(host='fake')

        def _test_save_args(self2, context, instance_uuid, changes):
            self._check_update_primitive(req_obj, changes)
            # DB creation would have set an id
            changes['id'] = 42
            return changes

        with mock.patch.object(request_spec.RequestSpec, '_save_in_db',
                               _test_save_args):
            req_obj.save()
Ejemplo n.º 15
0
    def test_request_single_cell(self):
        spec_obj = self._get_fake_request_spec()
        spec_obj.requested_destination = objects.Destination(
            cell=objects.CellMapping(uuid=uuids.cell2))
        host_states_cell1 = [self._get_fake_host_state(i)
                             for i in range(1, 5)]
        host_states_cell2 = [self._get_fake_host_state(i)
                             for i in range(5, 10)]

        self.driver.all_host_states = {
            uuids.cell1: host_states_cell1,
            uuids.cell2: host_states_cell2,
        }
        d = self.driver.select_destinations(self.context, spec_obj)
        self.assertIn(d[0]['host'], [hs.host for hs in host_states_cell2])
Ejemplo n.º 16
0
    def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
        """Builds a RequestSpec that can be passed to select_destinations

        Used when calling the scheduler to pick a destination host for live
        migrating the instance.

        :param attempted_hosts: List of host names to ignore in the scheduler.
            This is generally at least seeded with the source host.
        :returns: nova.objects.RequestSpec object
        """
        request_spec = self.request_spec
        # NOTE(sbauza): Force_hosts/nodes needs to be reset
        # if we want to make sure that the next destination
        # is not forced to be the original host
        request_spec.reset_forced_destinations()

        port_res_req, req_lvl_params = (
            self.network_api.get_requested_resource_for_instance(
                self.context, self.instance.uuid))
        # NOTE(gibi): When cyborg or other module wants to handle
        # similar non-nova resources then here we have to collect
        # all the external resource requests in a single list and
        # add them to the RequestSpec.
        request_spec.requested_resources = port_res_req
        request_spec.request_level_params = req_lvl_params

        scheduler_utils.setup_instance_group(self.context, request_spec)

        # We currently only support live migrating to hosts in the same
        # cell that the instance lives in, so we need to tell the scheduler
        # to limit the applicable hosts based on cell.
        cell_mapping = self._get_source_cell_mapping()
        LOG.debug('Requesting cell %(cell)s while live migrating',
                  {'cell': cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in request_spec
                and request_spec.requested_destination):
            request_spec.requested_destination.cell = cell_mapping
        else:
            request_spec.requested_destination = objects.Destination(
                cell=cell_mapping)

        request_spec.ensure_project_and_user_id(self.instance)
        request_spec.ensure_network_information(self.instance)
        compute_utils.heal_reqspec_is_bfv(self.context, request_spec,
                                          self.instance)

        return request_spec
Ejemplo n.º 17
0
 def test_get_weighed_hosts_allow_cross_cell_move_true_positive(self):
     """Tests a cross-cell move where the host in the source (preferred)
     cell should be weighed higher than the host in the other cell based
     on the default configuration.
     """
     request_spec = objects.RequestSpec(
         requested_destination=objects.Destination(
             cell=objects.CellMapping(uuid=uuids.cell1),
             allow_cross_cell_move=True))
     weighed_hosts = self._get_weighed_hosts(request_spec)
     multiplier = CONF.filter_scheduler.cross_cell_move_weight_multiplier
     self.assertEqual([multiplier, 0.0],
                      [wh.weight for wh in weighed_hosts])
     # host1 should be preferred since it's in cell1
     preferred_host = weighed_hosts[0]
     self.assertEqual('host1', preferred_host.obj.host)
Ejemplo n.º 18
0
    def _set_requested_destination_cell(self, legacy_props):
        instance_mapping = objects.InstanceMapping.get_by_instance_uuid(
            self.context, self.instance.uuid)
        if not ('requested_destination' in self.request_spec
                and self.request_spec.requested_destination):
            self.request_spec.requested_destination = objects.Destination()
        targeted = 'host' in self.request_spec.requested_destination
        # NOTE(mriedem): If the user is allowed to perform a cross-cell resize
        # then add the current cell to the request spec as "preferred" so the
        # scheduler will (by default) weigh hosts within the current cell over
        # hosts in another cell, all other things being equal. If the user is
        # not allowed to perform cross-cell resize, then we limit the request
        # spec and tell the scheduler to only look at hosts in the current
        # cell.
        cross_cell_allowed = (
            self.request_spec.requested_destination.allow_cross_cell_move)
        if targeted and cross_cell_allowed:
            # If a target host is specified it might be in another cell so
            # we cannot restrict the cell in this case. We would not prefer
            # the source cell in that case either since we know where the
            # user wants it to go. We just let the scheduler figure it out.
            self.request_spec.requested_destination.cell = None
        else:
            self.request_spec.requested_destination.cell = (
                instance_mapping.cell_mapping)

        # NOTE(takashin): In the case that the target host is specified,
        # if the migration is failed, it is not necessary to retry
        # the cold migration to the same host. So make sure that
        # reschedule will not occur.
        if targeted:
            legacy_props.pop('retry', None)
            self.request_spec.retry = None

        # Log our plan before calling the scheduler.
        if cross_cell_allowed and targeted:
            LOG.debug('Not restricting cell for targeted cold migration.',
                      instance=self.instance)
        elif cross_cell_allowed:
            LOG.debug('Allowing migration from cell %(cell)s',
                      {'cell': instance_mapping.cell_mapping.identity},
                      instance=self.instance)
        else:
            LOG.debug('Restricting to cell %(cell)s while migrating',
                      {'cell': instance_mapping.cell_mapping.identity},
                      instance=self.instance)
Ejemplo n.º 19
0
    def test_resources_from_request_spec_aggregates(self):
        destination = objects.Destination()
        flavor = objects.Flavor(vcpus=1, memory_mb=1024,
                                root_gb=1, ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor,
                                      requested_destination=destination)

        destination.require_aggregates(['foo', 'bar'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([['foo', 'bar']],
                         req.get_request_group(None).aggregates)

        destination.require_aggregates(['baz'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([['foo', 'bar'], ['baz']],
                         req.get_request_group(None).aggregates)
Ejemplo n.º 20
0
 def test_to_legacy_filter_properties_dict(self):
     fake_numa_limits = objects.NUMATopologyLimits()
     fake_computes_obj = objects.ComputeNodeList(objects=[
         objects.ComputeNode(host='fake1', hypervisor_hostname='node1')
     ])
     fake_dest = objects.Destination(host='fakehost')
     spec = objects.RequestSpec(
         ignore_hosts=['ignoredhost'],
         force_hosts=['fakehost'],
         force_nodes=['fakenode'],
         retry=objects.SchedulerRetries(num_attempts=1,
                                        hosts=fake_computes_obj),
         limits=objects.SchedulerLimits(numa_topology=fake_numa_limits,
                                        vcpu=1.0,
                                        disk_gb=10.0,
                                        memory_mb=8192.0),
         instance_group=objects.InstanceGroup(hosts=['fake1'],
                                              policy='affinity',
                                              members=['inst1', 'inst2']),
         scheduler_hints={'foo': ['bar']},
         requested_destination=fake_dest)
     expected = {
         'ignore_hosts': ['ignoredhost'],
         'force_hosts': ['fakehost'],
         'force_nodes': ['fakenode'],
         'retry': {
             'num_attempts': 1,
             'hosts': [['fake1', 'node1']]
         },
         'limits': {
             'numa_topology': fake_numa_limits,
             'vcpu': 1.0,
             'disk_gb': 10.0,
             'memory_mb': 8192.0
         },
         'group_updated': True,
         'group_hosts': set(['fake1']),
         'group_policies': set(['affinity']),
         'group_members': set(['inst1', 'inst2']),
         'scheduler_hints': {
             'foo': 'bar'
         },
         'requested_destination': fake_dest
     }
     self.assertEqual(expected, spec.to_legacy_filter_properties_dict())
Ejemplo n.º 21
0
 def test_get_weighed_hosts_allow_cross_cell_move_true_negative(self):
     """Tests a cross-cell move where the host in another cell should be
     weighed higher than the host in the source cell because the weight
     value is negative.
     """
     self.flags(cross_cell_move_weight_multiplier=-1000,
                group='filter_scheduler')
     request_spec = objects.RequestSpec(
         requested_destination=objects.Destination(
             # cell1 is the source cell
             cell=objects.CellMapping(uuid=uuids.cell1),
             allow_cross_cell_move=True))
     weighed_hosts = self._get_weighed_hosts(request_spec)
     multiplier = CONF.filter_scheduler.cross_cell_move_weight_multiplier
     self.assertEqual([0.0, multiplier],
                      [wh.weight for wh in weighed_hosts])
     # host2 should be preferred since it's *not* in cell1
     preferred_host = weighed_hosts[0]
     self.assertEqual('host2', preferred_host.obj.host)
Ejemplo n.º 22
0
    def test_request_single_cell(self):
        spec_obj = self._get_fake_request_spec()
        spec_obj.requested_destination = objects.Destination(
            cell=objects.CellMapping(uuid=uuids.cell2))
        host_states_cell1 = [self._get_fake_host_state(i)
                             for i in range(1, 5)]
        host_states_cell2 = [self._get_fake_host_state(i)
                             for i in range(5, 10)]

        self.driver.all_host_states = {
            uuids.cell1: host_states_cell1,
            uuids.cell2: host_states_cell2,
        }
        provider_summaries = {
            cn.uuid: cn for cn in host_states_cell1 + host_states_cell2
        }

        d = self.driver.select_destinations(self.context, spec_obj,
                [spec_obj.instance_uuid], {}, provider_summaries)
        self.assertIn(d[0].host, [hs.host for hs in host_states_cell2])
Ejemplo n.º 23
0
    def test_resources_from_request_spec_no_aggregates(self):
        flavor = objects.Flavor(vcpus=1, memory_mb=1024,
                                root_gb=1, ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor)

        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).aggregates)

        reqspec.requested_destination = None
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).aggregates)

        reqspec.requested_destination = objects.Destination()
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).aggregates)

        reqspec.requested_destination.aggregates = None
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).aggregates)
    def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
        """Builds a RequestSpec that can be passed to select_destinations

        Used when calling the scheduler to pick a destination host for live
        migrating the instance.

        :param attempted_hosts: List of host names to ignore in the scheduler.
            This is generally at least seeded with the source host.
        :returns: nova.objects.RequestSpec object
        """
        request_spec = self.request_spec
        # NOTE(sbauza): Force_hosts/nodes needs to be reset
        # if we want to make sure that the next destination
        # is not forced to be the original host
        request_spec.reset_forced_destinations()

        # TODO(gibi): We need to make sure that the requested_resources field
        # is re calculated based on neutron ports.
        scheduler_utils.setup_instance_group(self.context, request_spec)

        # We currently only support live migrating to hosts in the same
        # cell that the instance lives in, so we need to tell the scheduler
        # to limit the applicable hosts based on cell.
        cell_mapping = self._get_source_cell_mapping()
        LOG.debug('Requesting cell %(cell)s while live migrating',
                  {'cell': cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in request_spec
                and request_spec.requested_destination):
            request_spec.requested_destination.cell = cell_mapping
        else:
            request_spec.requested_destination = objects.Destination(
                cell=cell_mapping)

        request_spec.ensure_project_and_user_id(self.instance)
        request_spec.ensure_network_metadata(self.instance)
        compute_utils.heal_reqspec_is_bfv(self.context, request_spec,
                                          self.instance)

        return request_spec
Ejemplo n.º 25
0
 def test_process_use_requested_destination(self):
     fake_cell = objects.CellMapping(uuid=uuids.cell1, name='foo')
     destination = objects.Destination(host='fake-host',
                                       node='fake-node',
                                       cell=fake_cell)
     fake_nodes = objects.ComputeNodeList(objects=[
         objects.ComputeNode(host='fake-host',
                             uuid='12345678-1234-1234-1234-123456789012',
                             hypervisor_hostname='fake-node')
     ])
     self.mock_host_manager.get_compute_nodes_by_host_or_node.\
         return_value = fake_nodes
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor,
                                     requested_destination=destination)
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = objects.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
         in_tree='12345678-1234-1234-1234-123456789012',
     )
     resources = utils.resources_from_request_spec(self.context, fake_spec,
                                                   self.mock_host_manager)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'in_tree=12345678-1234-1234-1234-123456789012&'
         'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
     self.assertEqual(expected_querystring, resources.to_querystring())
     self.mock_host_manager.get_compute_nodes_by_host_or_node.\
         assert_called_once_with(
             self.context, 'fake-host', 'fake-node', cell=fake_cell)
Ejemplo n.º 26
0
def map_az_to_placement_aggregate(ctxt, request_spec):
    """Map requested nova availability zones to placement aggregates.

    This will modify request_spec to request hosts in an aggregate that
    matches the desired AZ of the user's request.
    """
    if not CONF.scheduler.query_placement_for_availability_zone:
        return

    az_hint = request_spec.availability_zone
    if not az_hint:
        return

    aggregates = objects.AggregateList.get_by_metadata(ctxt,
                                                       key='availability_zone',
                                                       value=az_hint)
    if aggregates:
        if ('requested_destination' not in request_spec or
                request_spec.requested_destination is None):
            request_spec.requested_destination = objects.Destination()
        request_spec.requested_destination.require_aggregates(
            [agg.uuid for agg in aggregates])
Ejemplo n.º 27
0
def isolate_aggregates(ctxt, request_spec):
    """Prepare list of aggregates that should be isolated.

    This filter will prepare the list of aggregates that should be
    ignored by the placement service. It checks if aggregates has metadata
    'trait:<trait_name>='required' and if <trait_name> is not present in
    either of flavor extra specs or image properties, then those aggregates
    will be included in the list of isolated aggregates.

    Precisely this filter gets the trait request form the image and
    flavor and unions them. Then it accumulates the set of aggregates that
    request traits are "non_matching_by_metadata_keys" and uses that to
    produce the list of isolated aggregates.
    """

    if not CONF.scheduler.enable_isolated_aggregate_filtering:
        return False

    # Get required traits set in flavor and image
    res_req = utils.ResourceRequest(request_spec)
    required_traits = res_req.all_required_traits

    keys = ['trait:%s' % trait for trait in required_traits]

    isolated_aggregates = (
        objects.aggregate.AggregateList.get_non_matching_by_metadata_keys(
            ctxt, keys, 'trait:', value='required'))

    # Set list of isolated aggregates to destination object of request_spec
    if isolated_aggregates:
        if ('requested_destination' not in request_spec
                or request_spec.requested_destination is None):
            request_spec.requested_destination = objects.Destination()

        destination = request_spec.requested_destination
        destination.append_forbidden_aggregates(agg.uuid
                                                for agg in isolated_aggregates)

    return True
Ejemplo n.º 28
0
 def _restrict_request_spec_to_cell(self, legacy_props):
     # NOTE(danms): Right now we only support migrate to the same
     # cell as the current instance, so request that the scheduler
     # limit thusly.
     instance_mapping = objects.InstanceMapping.get_by_instance_uuid(
         self.context, self.instance.uuid)
     LOG.debug('Requesting cell %(cell)s while migrating',
               {'cell': instance_mapping.cell_mapping.identity},
               instance=self.instance)
     if ('requested_destination' in self.request_spec and
             self.request_spec.requested_destination):
         self.request_spec.requested_destination.cell = (
             instance_mapping.cell_mapping)
         # NOTE(takashin): In the case that the target host is specified,
         # if the migration is failed, it is not necessary to retry
         # the cold migration to the same host. So make sure that
         # reschedule will not occur.
         if 'host' in self.request_spec.requested_destination:
             legacy_props.pop('retry', None)
             self.request_spec.retry = None
     else:
         self.request_spec.requested_destination = objects.Destination(
             cell=instance_mapping.cell_mapping)
Ejemplo n.º 29
0
 def test_destination_1dotoh(self):
     destination = objects.Destination(aggregates=['foo'])
     primitive = destination.obj_to_primitive(target_version='1.0')
     self.assertNotIn('aggregates', primitive['nova_object.data'])
Ejemplo n.º 30
0
 def test_destination_require_aggregates(self):
     destination = objects.Destination()
     destination.require_aggregates(['foo', 'bar'])
     destination.require_aggregates(['baz'])
     self.assertEqual(['foo,bar', 'baz'], destination.aggregates)