Exemplo n.º 1
0
 def test_migrate_live_pre_check_error(self):
     self._test_migrate_live_failed_with_exception(
         exception.MigrationPreCheckError(reason=''))
Exemplo n.º 2
0
 def test_find_destination_retry_with_failed_mig_pre_checks_fail(self):
     self._test_find_destination_retry_livem_checks_fail(
         exception.MigrationPreCheckError("reason"))
Exemplo n.º 3
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode
                not in self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name, state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Source migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter).dlt_vopt(
            lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data
Exemplo n.º 4
0
    def _claim_resources_on_destination(self, source_node, dest_node):
        """Copies allocations from source node to dest node in Placement

        :param source_node: source ComputeNode where the instance currently
                            lives
        :param dest_node: destination ComputeNode where the instance is being
                          forced to live migrate.
        """
        reportclient = self.scheduler_client.reportclient
        # Get the current allocations for the source node and the instance.
        source_node_allocations = reportclient.get_allocations_for_instance(
            source_node.uuid, self.instance)
        if source_node_allocations:
            # Generate an allocation request for the destination node.
            alloc_request = {
                'allocations': [{
                    'resource_provider': {
                        'uuid': dest_node.uuid
                    },
                    'resources': source_node_allocations
                }]
            }
            # The claim_resources method will check for existing allocations
            # for the instance and effectively "double up" the allocations for
            # both the source and destination node. That's why when requesting
            # allocations for resources on the destination node before we live
            # migrate, we use the existing resource allocations from the
            # source node.
            if reportclient.claim_resources(self.instance.uuid, alloc_request,
                                            self.instance.project_id,
                                            self.instance.user_id):
                LOG.debug(
                    'Instance allocations successfully created on '
                    'destination node %(dest)s: %(alloc_request)s', {
                        'dest': dest_node.uuid,
                        'alloc_request': alloc_request
                    },
                    instance=self.instance)
            else:
                # We have to fail even though the user requested that we force
                # the host. This is because we need Placement to have an
                # accurate reflection of what's allocated on all nodes so the
                # scheduler can make accurate decisions about which nodes have
                # capacity for building an instance. We also cannot rely on the
                # resource tracker in the compute service automatically healing
                # the allocations since that code is going away in Queens.
                reason = (_('Unable to migrate instance %(instance_uuid)s to '
                            'host %(host)s. There is not enough capacity on '
                            'the host for the instance.') % {
                                'instance_uuid': self.instance.uuid,
                                'host': self.destination
                            })
                raise exception.MigrationPreCheckError(reason=reason)
        else:
            # This shouldn't happen, but it could be a case where there are
            # older (Ocata) computes still so the existing allocations are
            # getting overwritten by the update_available_resource periodic
            # task in the compute service.
            # TODO(mriedem): Make this an error when the auto-heal
            # compatibility code in the resource tracker is removed.
            LOG.warning(
                'No instance allocations found for source node '
                '%(source)s in Placement. Not creating allocations '
                'for destination node %(dest)s and assuming the '
                'compute service will heal the allocations.', {
                    'source': source_node.uuid,
                    'dest': dest_node.uuid
                },
                instance=self.instance)