Exemple #1
0
    def _execute(self):
        self._check_instance_is_active()
        self._check_host_is_up(self.source)

        if should_do_migration_allocation(self.context):
            self._source_cn, self._held_allocations = (
                # NOTE(danms): This may raise various exceptions, which will
                # propagate to the API and cause a 500. This is what we
                # want, as it would indicate internal data structure corruption
                # (such as missing migrations, compute nodes, etc).
                migrate.replace_allocation_with_migration(self.context,
                                                          self.instance,
                                                          self.migration))

        if not self.destination:
            # Either no host was specified in the API request and the user
            # wants the scheduler to pick a destination host, or a host was
            # specified but is not forcing it, so they want the scheduler
            # filters to run on the specified host, like a scheduler hint.
            self.destination, dest_node = self._find_destination()
        else:
            # This is the case that the user specified the 'force' flag when
            # live migrating with a specific destination host so the scheduler
            # is bypassed. There are still some minimal checks performed here
            # though.
            source_node, dest_node = self._check_requested_destination()
            # Now that we're semi-confident in the force specified host, we
            # need to copy the source compute node allocations in Placement
            # to the destination compute node. Normally select_destinations()
            # in the scheduler would do this for us, but when forcing the
            # target host we don't call the scheduler.
            # TODO(mriedem): In Queens, call select_destinations() with a
            # skip_filters=True flag so the scheduler does the work of claiming
            # resources on the destination in Placement but still bypass the
            # scheduler filters, which honors the 'force' flag in the API.
            # This raises NoValidHost which will be handled in
            # ComputeTaskManager.
            scheduler_utils.claim_resources_on_destination(
                self.context, self.scheduler_client.reportclient,
                self.instance, source_node, dest_node,
                source_node_allocations=self._held_allocations)

            # dest_node is a ComputeNode object, so we need to get the actual
            # node name off it to set in the Migration object below.
            dest_node = dest_node.hypervisor_hostname

        self.migration.source_node = self.instance.node
        self.migration.dest_node = dest_node
        self.migration.dest_compute = self.destination
        self.migration.save()

        # TODO(johngarbutt) need to move complexity out of compute manager
        # TODO(johngarbutt) disk_over_commit?
        return self.compute_rpcapi.live_migration(self.context,
                host=self.source,
                instance=self.instance,
                dest=self.destination,
                block_migration=self.block_migration,
                migration=self.migration,
                migrate_data=self.migrate_data)
Exemple #2
0
 def test(mock_claim, mock_get_allocs):
     utils.claim_resources_on_destination(self.context, reportclient,
                                          instance, source_node,
                                          dest_node)
     mock_get_allocs.assert_called_once_with(self.context,
                                             uuids.source_node,
                                             instance.uuid)
Exemple #3
0
    def _execute(self):
        self._check_instance_is_active()
        self._check_host_is_up(self.source)

        if should_do_migration_allocation(self.context):
            self._source_cn, self._held_allocations = (
                # NOTE(danms): This may raise various exceptions, which will
                # propagate to the API and cause a 500. This is what we
                # want, as it would indicate internal data structure corruption
                # (such as missing migrations, compute nodes, etc).
                migrate.replace_allocation_with_migration(self.context,
                                                          self.instance,
                                                          self.migration))

        if not self.destination:
            # Either no host was specified in the API request and the user
            # wants the scheduler to pick a destination host, or a host was
            # specified but is not forcing it, so they want the scheduler
            # filters to run on the specified host, like a scheduler hint.
            self.destination, dest_node = self._find_destination()
        else:
            # This is the case that the user specified the 'force' flag when
            # live migrating with a specific destination host so the scheduler
            # is bypassed. There are still some minimal checks performed here
            # though.
            source_node, dest_node = self._check_requested_destination()
            # Now that we're semi-confident in the force specified host, we
            # need to copy the source compute node allocations in Placement
            # to the destination compute node. Normally select_destinations()
            # in the scheduler would do this for us, but when forcing the
            # target host we don't call the scheduler.
            # TODO(mriedem): In Queens, call select_destinations() with a
            # skip_filters=True flag so the scheduler does the work of claiming
            # resources on the destination in Placement but still bypass the
            # scheduler filters, which honors the 'force' flag in the API.
            # This raises NoValidHost which will be handled in
            # ComputeTaskManager.
            scheduler_utils.claim_resources_on_destination(
                self.context, self.scheduler_client.reportclient,
                self.instance, source_node, dest_node,
                source_node_allocations=self._held_allocations)

            # dest_node is a ComputeNode object, so we need to get the actual
            # node name off it to set in the Migration object below.
            dest_node = dest_node.hypervisor_hostname

        self.migration.source_node = self.instance.node
        self.migration.dest_node = dest_node
        self.migration.dest_compute = self.destination
        self.migration.save()

        # TODO(johngarbutt) need to move complexity out of compute manager
        # TODO(johngarbutt) disk_over_commit?
        return self.compute_rpcapi.live_migration(self.context,
                host=self.source,
                instance=self.instance,
                dest=self.destination,
                block_migration=self.block_migration,
                migration=self.migration,
                migrate_data=self.migrate_data)
Exemple #4
0
 def _select_destination():
     if not self.destination:
         # Either no host was specified in the API request and the user
         # wants the scheduler to pick a destination host, or a host was
         # specified but is not forcing it, so they want the scheduler
         # filters to run on the specified host, like a scheduler hint.
         self.destination, self.sched_limits = self._find_destination()
     else:
         # This is the case that the user specified the 'force' flag
         # when live migrating with a specific destination host so the
         # scheduler is bypassed. There are still some minimal checks
         # performed here though.
         source_node, dest_node = self._check_requested_destination()
         # Now that we're semi-confident in the force specified host, we
         # need to copy the source compute node allocations in Placement
         # to the destination compute node.
         # Normally select_destinations()
         # in the scheduler would do this for us, but when forcing the
         # target host we don't call the scheduler.
         # TODO(mriedem): In Queens, call select_destinations() with a
         # skip_filters=True flag so the scheduler does the work of
         # claiming resources on the destination in Placement but still
         # bypass the scheduler filters, which honors the 'force' flag
         # in the API.
         # This raises NoValidHost which will be handled in
         # ComputeTaskManager.
         scheduler_utils.claim_resources_on_destination(
             self.scheduler_client.reportclient, self.instance,
             source_node, dest_node)
Exemple #5
0
 def test(mock_claim, mock_get_allocs):
     utils.claim_resources_on_destination(
         reportclient, instance, source_node, dest_node,
         source_res_allocs)
     self.assertFalse(mock_get_allocs.called)
     mock_claim.assert_called_once_with(
         instance.uuid, dest_alloc_request,
         instance.project_id, instance.user_id)
Exemple #6
0
 def test(mock_claim, mock_get_allocs):
     utils.claim_resources_on_destination(
         self.context, reportclient, instance, source_node, dest_node,
         source_res_allocs, consumer_generation=None)
     self.assertFalse(mock_get_allocs.called)
     mock_claim.assert_called_once_with(
         self.context, instance.uuid, dest_alloc_request,
         instance.project_id, instance.user_id,
         allocation_request_version='1.28', consumer_generation=None)
Exemple #7
0
 def test(mock_claim, mock_get_allocs):
     utils.claim_resources_on_destination(
         self.context, reportclient, instance, source_node, dest_node,
         source_res_allocs)
     self.assertFalse(mock_get_allocs.called)
     mock_claim.assert_called_once_with(
         self.context, instance.uuid, dest_alloc_request,
         instance.project_id, instance.user_id,
         allocation_request_version='1.12')
    def _execute(self):
        self._check_instance_is_active()
        self._check_host_is_up(self.source)
        LOG.info(
            "Start check host destination vcpu is compatible with new instance: "
        )
        try:
            self._check_host_destination_vcpu_is_compatible()
        except Exception as error:
            raise exception.MigrationPreCheckError(str(error))

        if should_do_migration_allocation(self.context):
            self._source_cn, self._held_allocations = (
                migrate.replace_allocation_with_migration(
                    self.context, self.instance, self.migration))

        if not self.destination:

            self.destination, dest_node = self._find_destination()
        else:

            source_node, dest_node = self._check_requested_destination()

            scheduler_utils.claim_resources_on_destination(
                self.context,
                self.scheduler_client.reportclient,
                self.instance,
                source_node,
                dest_node,
                source_node_allocations=self._held_allocations)

            # dest_node is a ComputeNode object, so we need to get the actual
            # node name off it to set in the Migration object below.
            dest_node = dest_node.hypervisor_hostname

        self.migration.source_node = self.instance.node
        self.migration.dest_node = dest_node
        self.migration.dest_compute = self.destination
        self.migration.save()

        # TODO(johngarbutt) need to move complexity out of compute manager
        # TODO(johngarbutt) disk_over_commit?
        return self.compute_rpcapi.live_migration(
            self.context,
            host=self.source,
            instance=self.instance,
            dest=self.destination,
            block_migration=self.block_migration,
            migration=self.migration,
            migrate_data=self.migrate_data)
Exemple #9
0
    def _execute(self):
        self._check_instance_is_active()
        self._check_host_is_up(self.source)

        if not self.destination:
            # Either no host was specified in the API request and the user
            # wants the scheduler to pick a destination host, or a host was
            # specified but is not forcing it, so they want the scheduler
            # filters to run on the specified host, like a scheduler hint.
            self.destination = self._find_destination()
            self.migration.dest_compute = self.destination
            self.migration.save()
        else:
            # This is the case that the user specified the 'force' flag when
            # live migrating with a specific destination host so the scheduler
            # is bypassed. There are still some minimal checks performed here
            # though.
            source_node, dest_node = self._check_requested_destination()
            # Now that we're semi-confident in the force specified host, we
            # need to copy the source compute node allocations in Placement
            # to the destination compute node. Normally select_destinations()
            # in the scheduler would do this for us, but when forcing the
            # target host we don't call the scheduler.
            # TODO(mriedem): In Queens, call select_destinations() with a
            # skip_filters=True flag so the scheduler does the work of claiming
            # resources on the destination in Placement but still bypass the
            # scheduler filters, which honors the 'force' flag in the API.
            # This raises NoValidHost which will be handled in
            # ComputeTaskManager.
            scheduler_utils.claim_resources_on_destination(
                self.scheduler_client.reportclient, self.instance, source_node,
                dest_node)

        # TODO(johngarbutt) need to move complexity out of compute manager
        # TODO(johngarbutt) disk_over_commit?
        return self.compute_rpcapi.live_migration(
            self.context,
            host=self.source,
            instance=self.instance,
            dest=self.destination,
            block_migration=self.block_migration,
            migration=self.migration,
            migrate_data=self.migrate_data)
Exemple #10
0
    def _execute(self):
        self._check_instance_is_active()
        self._check_instance_has_no_numa()
        self._check_host_is_up(self.source)

        self._source_cn, self._held_allocations = (
            # NOTE(danms): This may raise various exceptions, which will
            # propagate to the API and cause a 500. This is what we
            # want, as it would indicate internal data structure corruption
            # (such as missing migrations, compute nodes, etc).
            migrate.replace_allocation_with_migration(self.context,
                                                      self.instance,
                                                      self.migration))

        if not self.destination:
            # Either no host was specified in the API request and the user
            # wants the scheduler to pick a destination host, or a host was
            # specified but is not forcing it, so they want the scheduler
            # filters to run on the specified host, like a scheduler hint.
            self.destination, dest_node, self.limits = self._find_destination()
        else:
            # This is the case that the user specified the 'force' flag when
            # live migrating with a specific destination host so the scheduler
            # is bypassed. There are still some minimal checks performed here
            # though.
            self._check_destination_is_not_source()
            self._check_host_is_up(self.destination)
            self._check_destination_has_enough_memory()
            source_node, dest_node = (
                self._check_compatible_with_source_hypervisor(
                    self.destination))
            # TODO(mriedem): Call select_destinations() with a
            # skip_filters=True flag so the scheduler does the work of claiming
            # resources on the destination in Placement but still bypass the
            # scheduler filters, which honors the 'force' flag in the API.
            # This raises NoValidHost which will be handled in
            # ComputeTaskManager.
            # NOTE(gibi): consumer_generation = None as we expect that the
            # source host allocation is held by the migration therefore the
            # instance is a new, empty consumer for the dest allocation. If
            # this assumption fails then placement will return consumer
            # generation conflict and this call raise a AllocationUpdateFailed
            # exception. We let that propagate here to abort the migration.
            # NOTE(luyao): When forcing the target host we don't call the
            # scheduler, that means we need to get allocations from placement
            # first, then claim resources in resource tracker on the
            # destination host based on these allocations.
            scheduler_utils.claim_resources_on_destination(
                self.context,
                self.report_client,
                self.instance,
                source_node,
                dest_node,
                source_allocations=self._held_allocations,
                consumer_generation=None)
            try:
                self._check_requested_destination()
            except Exception:
                with excutils.save_and_reraise_exception():
                    self._remove_host_allocations(dest_node.uuid)

            # dest_node is a ComputeNode object, so we need to get the actual
            # node name off it to set in the Migration object below.
            dest_node = dest_node.hypervisor_hostname

        self.instance.availability_zone = (
            availability_zones.get_host_availability_zone(
                self.context, self.destination))

        self.migration.source_node = self.instance.node
        self.migration.dest_node = dest_node
        self.migration.dest_compute = self.destination
        self.migration.save()

        # TODO(johngarbutt) need to move complexity out of compute manager
        # TODO(johngarbutt) disk_over_commit?
        return self.compute_rpcapi.live_migration(
            self.context,
            host=self.source,
            instance=self.instance,
            dest=self.destination,
            block_migration=self.block_migration,
            migration=self.migration,
            migrate_data=self.migrate_data)
Exemple #11
0
 def test(mock_claim, mock_get_allocs):
     utils.claim_resources_on_destination(
         self.context, reportclient, instance, source_node, dest_node)
     mock_get_allocs.assert_called_once_with(
         self.context, uuids.source_node, instance.uuid)