Ejemplo n.º 1
0
def replace_all(context, alloc_list):
    """Replace the supplied allocations.

    :note: This method always deletes all allocations for all consumers
           referenced in the list of Allocation objects and then replaces
           the consumer's allocations with the Allocation objects. In doing
           so, it will end up setting the Allocation.id attribute of each
           Allocation object.
    """
    # Retry _set_allocations server side if there is a
    # ResourceProviderConcurrentUpdateDetected. We don't care about
    # sleeping, we simply want to reset the resource provider objects
    # and try again. For sake of simplicity (and because we don't have
    # easy access to the information) we reload all the resource
    # providers that may be present.
    retries = context.config.placement.allocation_conflict_retry_count
    while retries:
        retries -= 1
        try:
            _set_allocations(context, alloc_list)
            break
        except exception.ResourceProviderConcurrentUpdateDetected:
            LOG.debug('Retrying allocations write on resource provider '
                      'generation conflict')
            # We only want to reload each unique resource provider once.
            alloc_rp_uuids = set(alloc.resource_provider.uuid
                                 for alloc in alloc_list)
            seen_rps = {}
            for rp_uuid in alloc_rp_uuids:
                # NOTE(melwitt): We use a separate database transaction to read
                # the resource provider because we might be wrapped in an outer
                # database transaction when we reach here. We want to get an
                # up-to-date generation value in case a racing request has
                # changed it after we began an outer transaction and this is
                # the first time we are reading the resource provider records
                # during our transaction.
                db_context_manager = db_api.placement_context_manager
                with db_context_manager.reader.independent.using(context):
                    seen_rps[rp_uuid] = rp_obj.ResourceProvider.get_by_uuid(
                        context, rp_uuid)
            for alloc in alloc_list:
                rp_uuid = alloc.resource_provider.uuid
                alloc.resource_provider = seen_rps[rp_uuid]
    else:
        # We ran out of retries so we need to raise again.
        # The log will automatically have request id info associated with
        # it that will allow tracing back to specific allocations.
        # Attempting to extract specific consumer or resource provider
        # information from the allocations is not coherent as this
        # could be multiple consumers and providers.
        LOG.warning('Exceeded retry limit of %d on allocations write',
                    context.config.placement.allocation_conflict_retry_count)
        raise exception.ResourceProviderConcurrentUpdateDetected()
Ejemplo n.º 2
0
def replace_all(context, alloc_list):
    """Replace the supplied allocations.

    :note: This method always deletes all allocations for all consumers
           referenced in the list of Allocation objects and then replaces
           the consumer's allocations with the Allocation objects. In doing
           so, it will end up setting the Allocation.id attribute of each
           Allocation object.
    """
    # Retry _set_allocations server side if there is a
    # ResourceProviderConcurrentUpdateDetected. We don't care about
    # sleeping, we simply want to reset the resource provider objects
    # and try again. For sake of simplicity (and because we don't have
    # easy access to the information) we reload all the resource
    # providers that may be present.
    retries = RP_CONFLICT_RETRY_COUNT
    while retries:
        retries -= 1
        try:
            _set_allocations(context, alloc_list)
            break
        except exception.ResourceProviderConcurrentUpdateDetected:
            LOG.debug("Retrying allocations write on resource provider "
                      "generation conflict")
            # We only want to reload each unique resource provider once.
            alloc_rp_uuids = set(alloc.resource_provider.uuid
                                 for alloc in alloc_list)
            seen_rps = {}
            for rp_uuid in alloc_rp_uuids:
                seen_rps[rp_uuid] = rp_obj.ResourceProvider.get_by_uuid(
                    context, rp_uuid)
            for alloc in alloc_list:
                rp_uuid = alloc.resource_provider.uuid
                alloc.resource_provider = seen_rps[rp_uuid]
    else:
        # We ran out of retries so we need to raise again.
        # The log will automatically have request id info associated with
        # it that will allow tracing back to specific allocations.
        # Attempting to extract specific consumer or resource provider
        # information from the allocations is not coherent as this
        # could be multiple consumers and providers.
        LOG.warning("Exceeded retry limit of %d on allocations write",
                    RP_CONFLICT_RETRY_COUNT)
        raise exception.ResourceProviderConcurrentUpdateDetected()
Ejemplo n.º 3
0
    def increment_generation(self):
        """Increments this provider's generation value, supplying the
        currently-known generation.

        :raises placement.exception.ConcurrentUpdateDetected: if another thread
                updated the resource provider's view of its inventory or
                allocations in between the time when this object was originally
                read and the call to set the inventory.
        """
        rp_gen = self.generation
        new_generation = rp_gen + 1
        upd_stmt = _RP_TBL.update().where(sa.and_(
            _RP_TBL.c.id == self.id,
            _RP_TBL.c.generation == rp_gen)).values(
            generation=new_generation)

        res = self._context.session.execute(upd_stmt)
        if res.rowcount != 1:
            raise exception.ResourceProviderConcurrentUpdateDetected()
        self.generation = new_generation
Ejemplo n.º 4
0
    def test_set_allocations_retry(self, mock_log):
        """Test server side allocation write retry handling."""

        # Create a single resource provider and give it some inventory.
        rp1 = self._create_provider('rp1')
        tb.add_inventory(rp1, orc.VCPU, 24,
                         allocation_ratio=16.0)
        tb.add_inventory(rp1, orc.MEMORY_MB, 1024,
                         min_unit=64,
                         max_unit=1024,
                         step_size=64)
        original_generation = rp1.generation
        # Verify the generation is what we expect (we'll be checking again
        # later).
        self.assertEqual(2, original_generation)

        # Create a consumer and have it make an allocation.
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=rp1,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=rp1,
                resource_class=orc.MEMORY_MB,
                used=1024)
        ]

        # Make sure the right exception happens when the retry loop expires.
        self.conf_fixture.config(allocation_conflict_retry_count=0,
                                 group='placement')
        self.assertRaises(
            exception.ResourceProviderConcurrentUpdateDetected,
            alloc_obj.replace_all, self.ctx, alloc_list)
        mock_log.warning.assert_called_with(
            'Exceeded retry limit of %d on allocations write', 0)

        # Make sure the right thing happens after a small number of failures.
        # There's a bit of mock magic going on here to enusre that we can
        # both do some side effects on _set_allocations as well as have the
        # real behavior. Two generation conflicts and then a success.
        mock_log.reset_mock()
        self.conf_fixture.config(allocation_conflict_retry_count=3,
                                 group='placement')
        unmocked_set = alloc_obj._set_allocations
        with mock.patch('placement.objects.allocation.'
                        '_set_allocations') as mock_set:
            exceptions = iter([
                exception.ResourceProviderConcurrentUpdateDetected(),
                exception.ResourceProviderConcurrentUpdateDetected(),
            ])

            def side_effect(*args, **kwargs):
                try:
                    raise next(exceptions)
                except StopIteration:
                    return unmocked_set(*args, **kwargs)

            mock_set.side_effect = side_effect
            alloc_obj.replace_all(self.ctx, alloc_list)
            self.assertEqual(2, mock_log.debug.call_count)
            mock_log.debug.called_with(
                'Retrying allocations write on resource provider '
                'generation conflict')
            self.assertEqual(3, mock_set.call_count)

        # Confirm we're using a different rp object after the change
        # and that it has a higher generation.
        new_rp = alloc_list[0].resource_provider
        self.assertEqual(original_generation, rp1.generation)
        self.assertEqual(original_generation + 1, new_rp.generation)