示例#1
0
def revert_allocation_for_migration(context, source_cn, instance, migration):
    """Revert an allocation made for a migration back to the instance."""

    reportclient = report.SchedulerReportClient()

    # FIXME(danms): This method is flawed in that it asssumes allocations
    # against only one provider. So, this may overwite allocations against
    # a shared provider, if we had one.
    success = reportclient.move_allocations(context, migration.uuid,
                                            instance.uuid)
    if not success:
        LOG.error(
            'Unable to replace resource claim on source '
            'host %(host)s node %(node)s for instance', {
                'host': instance.host,
                'node': instance.node
            },
            instance=instance)
    else:
        LOG.debug('Created allocations for instance %(inst)s on %(rp)s', {
            'inst': instance.uuid,
            'rp': source_cn.uuid
        })
示例#2
0
def revert_allocation_for_migration(context, source_cn, instance, migration):
    """Revert an allocation made for a migration back to the instance."""

    reportclient = report.SchedulerReportClient()

    # FIXME(gibi): This method is flawed in that it does not handle allocations
    # against sharing providers in any special way. This leads to duplicate
    # allocations against the sharing provider during migration.
    success = reportclient.move_allocations(context, migration.uuid,
                                            instance.uuid)
    if not success:
        LOG.error(
            'Unable to replace resource claim on source '
            'host %(host)s node %(node)s for instance', {
                'host': instance.host,
                'node': instance.node
            },
            instance=instance)
    else:
        LOG.debug('Created allocations for instance %(inst)s on %(rp)s', {
            'inst': instance.uuid,
            'rp': source_cn.uuid
        })
示例#3
0
    def test_get_aggregates_for_routed_network(self, mock_get_segment_ids):
        mock_get_segment_ids.return_value = [uuids.segment1, uuids.segment2]
        report_client = report.SchedulerReportClient()
        network_api = neutron.API()

        def fake_get_provider_aggregates(context, segment_id):
            agg = uuids.agg1 if segment_id == uuids.segment1 else uuids.agg2
            agg_info = report.AggInfo(aggregates=[agg], generation=1)
            return agg_info

        with mock.patch.object(
                report_client,
                '_get_provider_aggregates',
                side_effect=fake_get_provider_aggregates) as mock_get_aggs:
            res = scheduler_utils.get_aggregates_for_routed_network(
                self.context, network_api, report_client, uuids.network1)
        self.assertEqual([uuids.agg1, uuids.agg2], res)
        mock_get_segment_ids.assert_called_once_with(self.context,
                                                     uuids.network1)
        mock_get_aggs.assert_has_calls([
            mock.call(self.context, uuids.segment1),
            mock.call(self.context, uuids.segment2)
        ])
示例#4
0
    def test_claim_resources_on_destination(self):
        """Happy path test where everything is successful."""
        reportclient = report.SchedulerReportClient()
        instance = fake_instance.fake_instance_obj(
            nova_context.get_admin_context())
        source_node = objects.ComputeNode(
            uuid=uuids.source_node, host=instance.host)
        dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
        source_res_allocs = {
            'VCPU': instance.vcpus,
            'MEMORY_MB': instance.memory_mb,
            # This would really include ephemeral and swap too but we're lazy.
            'DISK_GB': instance.root_gb
        }
        dest_alloc_request = {
            'allocations': {
                uuids.dest_node: {
                    'resources': source_res_allocs
                }
            }
        }

        @mock.patch.object(reportclient,
                           'get_allocations_for_consumer_by_provider')
        @mock.patch.object(reportclient,
                           'claim_resources', return_value=True)
        def test(mock_claim, mock_get_allocs):
            utils.claim_resources_on_destination(
                reportclient, instance, source_node, dest_node,
                source_res_allocs)
            self.assertFalse(mock_get_allocs.called)
            mock_claim.assert_called_once_with(
                instance.uuid, dest_alloc_request,
                instance.project_id, instance.user_id,
                allocation_request_version='1.12')

        test()
示例#5
0
    def test_claim_resources_on_destination_no_source_allocations(self):
        """Tests the negative scenario where the instance does not have
        allocations in Placement on the source compute node so no claim is
        attempted on the destination compute node.
        """
        reportclient = report.SchedulerReportClient()
        instance = fake_instance.fake_instance_obj(self.context)
        source_node = objects.ComputeNode(
            uuid=uuids.source_node, host=instance.host)
        dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')

        @mock.patch.object(reportclient,
                           'get_allocs_for_consumer',
                           return_value={})
        @mock.patch.object(reportclient,
                           'claim_resources',
                           new_callable=mock.NonCallableMock)
        def test(mock_claim, mock_get_allocs):
            utils.claim_resources_on_destination(
                self.context, reportclient, instance, source_node, dest_node)
            mock_get_allocs.assert_called_once_with(
                self.context, instance.uuid)

        test()
示例#6
0
    def test_get_aggregates_for_routed_subnet_fails(self,
                                                    mock_get_segment_ids):
        mock_get_segment_ids.return_value = uuids.segment1
        report_client = report.SchedulerReportClient()
        network_api = neutron.API()

        # We could fail on some placement issue...
        with mock.patch.object(report_client,
                               '_get_provider_aggregates',
                               return_value=None):
            self.assertRaises(exception.InvalidRoutedNetworkConfiguration,
                              scheduler_utils.get_aggregates_for_routed_subnet,
                              self.context, network_api, report_client,
                              uuids.subnet1)

        # ... but we also want to fail if we can't find the related aggregate
        agg_info = report.AggInfo(aggregates=set(), generation=1)
        with mock.patch.object(report_client,
                               '_get_provider_aggregates',
                               return_value=agg_info):
            self.assertRaises(exception.InvalidRoutedNetworkConfiguration,
                              scheduler_utils.get_aggregates_for_routed_subnet,
                              self.context, network_api, report_client,
                              uuids.subnet1)
示例#7
0
    def test_constructor(self, load_auth_mock, ks_sess_mock):
        report.SchedulerReportClient()

        load_auth_mock.assert_called_once_with(CONF, 'placement')
        ks_sess_mock.assert_called_once_with(auth=load_auth_mock.return_value)
示例#8
0
 def placementclient(self):
     if self._placementclient is None:
         self._placementclient = report.SchedulerReportClient()
     return self._placementclient
示例#9
0
def replace_allocation_with_migration(context, instance, migration):
    """Replace instance's allocation with one for a migration.

    :raises: keystoneauth1.exceptions.base.ClientException on failure to
             communicate with the placement API
    :raises: ConsumerAllocationRetrievalFailed if reading the current
             allocation from placement fails
    :raises: ComputeHostNotFound if the host of the instance is not found in
             the databse
    :raises: AllocationMoveFailed if moving the allocation from the
             instance.uuid to the migration.uuid fails due to parallel
             placement operation on the instance consumer
    :raises: NoValidHost if placement rejectes the update for other reasons
             (e.g. not enough resources)
    :returns: (source_compute_node, migration_allocation)
    """
    try:
        source_cn = objects.ComputeNode.get_by_host_and_nodename(
            context, instance.host, instance.node)
    except exception.ComputeHostNotFound:
        LOG.error('Unable to find record for source '
                  'node %(node)s on %(host)s',
                  {'host': instance.host, 'node': instance.node},
                  instance=instance)
        # A generic error like this will just error out the migration
        # and do any rollback required
        raise

    reportclient = report.SchedulerReportClient()

    orig_alloc = reportclient.get_allocs_for_consumer(
        context, instance.uuid)['allocations']
    root_alloc = orig_alloc.get(source_cn.uuid, {}).get('resources', {})
    if not root_alloc:
        LOG.debug('Unable to find existing allocations for instance on '
                  'source compute node: %s. This is normal if you are not '
                  'using the FilterScheduler.', source_cn.uuid,
                  instance=instance)
        return None, None

    # FIXME(gibi): This method is flawed in that it does not handle allocations
    # against sharing providers in any special way. This leads to duplicate
    # allocations against the sharing provider during migration.
    success = reportclient.move_allocations(context, instance.uuid,
                                            migration.uuid)
    if not success:
        LOG.error('Unable to replace resource claim on source '
                  'host %(host)s node %(node)s for instance',
                  {'host': instance.host,
                   'node': instance.node},
                  instance=instance)
        # Mimic the "no space" error that could have come from the
        # scheduler. Once we have an atomic replace operation, this
        # would be a severe error.
        raise exception.NoValidHost(
            reason=_('Unable to replace instance claim on source'))
    else:
        LOG.debug('Created allocations for migration %(mig)s on %(rp)s',
                  {'mig': migration.uuid, 'rp': source_cn.uuid})

    return source_cn, orig_alloc
示例#10
0
 def _generate_task(self):
     self.task = live_migrate.LiveMigrationTask(self.context,
         self.instance, self.destination, self.block_migration,
         self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(),
         servicegroup.API(), query.SchedulerQueryClient(),
         report.SchedulerReportClient(), self.fake_spec)
 def __init__(self, *args, **kwargs):
     super(FilterScheduler, self).__init__(*args, **kwargs)
     self.notifier = rpc.get_notifier('scheduler')
     self.placement_client = report.SchedulerReportClient()
示例#12
0
def routed_networks_filter(
    ctxt: nova_context.RequestContext,
    request_spec: 'objects.RequestSpec'
) -> bool:
    """Adds requested placement aggregates that match requested networks.

    This will modify request_spec to request hosts in aggregates that
    matches segment IDs related to requested networks.

    :param ctxt: The usual suspect for a context object.
    :param request_spec: a classic RequestSpec object containing the request.
    :returns: True if the filter was used or False if not.
    :raises: exception.InvalidRoutedNetworkConfiguration if something went
             wrong when trying to get the related segment aggregates.
    """
    if not CONF.scheduler.query_placement_for_routed_network_aggregates:
        return False

    # NOTE(sbauza): On a create operation with no specific network request, we
    # allocate the network only after scheduling when the nova-compute service
    # calls Neutron. In this case, here we just want to accept any destination
    # as fine.
    # NOTE(sbauza): This could be also going from an old compute reschedule.
    if 'requested_networks' not in request_spec:
        return True

    # This object field is not nullable
    requested_networks = request_spec.requested_networks

    # NOTE(sbauza): This field could be not created yet.
    if (
        'requested_destination' not in request_spec or
        request_spec.requested_destination is None
    ):
        request_spec.requested_destination = objects.Destination()

    # Get the clients we need
    network_api = neutron.API()
    report_api = report.SchedulerReportClient()

    for requested_network in requested_networks:
        network_id = None
        # Check for a specifically requested network ID.
        if "port_id" in requested_network and requested_network.port_id:
            # We have to lookup the port to see which segment(s) to support.
            port = network_api.show_port(ctxt, requested_network.port_id)[
                "port"
            ]
            if port['fixed_ips']:
                # The instance already exists with a related subnet. We need to
                # stick on this subnet.
                # NOTE(sbauza): In case of multiple IPs, we could have more
                # subnets than only one but given they would be for the same
                # port, just looking at the first subnet is needed.
                subnet_id = port['fixed_ips'][0]['subnet_id']
                try:
                    aggregates = utils.get_aggregates_for_routed_subnet(
                        ctxt, network_api, report_api, subnet_id)
                except exception.InvalidRoutedNetworkConfiguration as e:
                    raise exception.RequestFilterFailed(
                        reason=_('Aggregates not found for the subnet %s'
                        ) % subnet_id) from e
            else:
                # The port was just created without a subnet.
                network_id = port["network_id"]
        elif (
            "network_id" in requested_network and requested_network.network_id
        ):
            network_id = requested_network.network_id

        if network_id:
            # As the user only requested a network or a port unbound to a
            # segment, we are free to choose any segment from the network.
            try:
                aggregates = utils.get_aggregates_for_routed_network(
                    ctxt, network_api, report_api, network_id)
            except exception.InvalidRoutedNetworkConfiguration as e:
                raise exception.RequestFilterFailed(
                    reason=_('Aggregates not found for the network %s'
                    ) % network_id) from e

        if aggregates:
            LOG.debug(
                'routed_networks_filter request filter added the following '
                'aggregates for network ID %s: %s',
                network_id, ', '.join(aggregates))
            # NOTE(sbauza): All of the aggregates from this request will be
            # accepted, but they will have an AND relationship with any other
            # requested aggregate, like for another NIC request in this loop.
            request_spec.requested_destination.require_aggregates(aggregates)

    return True
示例#13
0
文件: __init__.py 项目: weiqiLee/nova
 def __init__(self):
     self.queryclient = query.SchedulerQueryClient()
     self.reportclient = report.SchedulerReportClient()
示例#14
0
def _get_placement_usages(context: 'nova.context.RequestContext',
                          project_id: str) -> ty.Dict[str, int]:
    global PLACEMENT_CLIENT
    if not PLACEMENT_CLIENT:
        PLACEMENT_CLIENT = report.SchedulerReportClient()
    return PLACEMENT_CLIENT.get_usages_counts_for_limits(context, project_id)