def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a dict containing migration info
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance,
                                         self.drvr.host_uuid)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.dest_data)

        # Only 'migrate_data' is sent to the destination on prelive call.
        mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)}
        self.src_data['migrate_data'] = mig_data
        LOG.debug('Src Migration data: %s' % self.src_data)

        # Check proc compatability modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode
                not in self.dest_data['dest_proc_compat'].split(',')):
            raise LiveMigrationProcCompat(
                name=self.instance.name,
                mode=lpar_w.proc_compat_mode,
                modes=', '.join(self.dest_data['dest_proc_compat'].split(',')))

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            raise LiveMigrationInvalidState(name=self.instance.name,
                                            state=lpar_w.migration_state)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        # Get the 'source' pre-migration data for the volume drivers.  Should
        # automatically update the mig_data dictionary as needed.
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(mig_data)

        # Remove the VOpt devices
        LOG.debug('Removing VOpt.', instance=self.instance)
        media.ConfigDrivePowerVM(self.drvr.adapter,
                                 self.drvr.host_uuid).dlt_vopt(lpar_w.uuid)
        LOG.debug('Removing VOpt finished.', instance=self.instance)

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.src_data
Example #2
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a dict containing migration info
        """

        lpar_w = vm.get_instance_wrapper(
            self.drvr.adapter, self.instance, self.drvr.host_uuid)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.dest_data)

        # Only 'migrate_data' is sent to the destination on prelive call.
        mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)}
        self.src_data['migrate_data'] = mig_data
        LOG.debug('Src Migration data: %s' % self.src_data)

        # Check proc compatability modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
                self.dest_data['dest_proc_compat'].split(',')):
            raise LiveMigrationProcCompat(
                name=self.instance.name, mode=lpar_w.proc_compat_mode,
                modes=', '.join(self.dest_data['dest_proc_compat'].split(',')))

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            raise LiveMigrationInvalidState(name=self.instance.name,
                                            state=lpar_w.migration_state)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        # Get the 'source' pre-migration data for the volume drivers.  Should
        # automatically update the mig_data dictionary as needed.
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(mig_data)

        # Remove the VOpt devices
        LOG.debug('Removing VOpt.', instance=self.instance)
        media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid
                                 ).dlt_vopt(lpar_w.uuid)
        LOG.debug('Removing VOpt finished.', instance=self.instance)

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.src_data
Example #3
0
 def test_get_public_key(self):
     self.cons_w.ssh_public_key = '1234554321'
     key = mc_task.get_public_key(mock.Mock())
     self.assertEqual('1234554321', key)
Example #4
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.mig_data)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
                self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name,
                        state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Src Migration data: %s' % self.mig_data)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid
                                 ).dlt_vopt(lpar_w.uuid, stg_ftsk=stg_ftsk,
                                            remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data
Example #5
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode
                not in self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name, state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Source migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter).dlt_vopt(
            lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data
 def test_get_public_key(self):
     self.cons_w.ssh_public_key = '1234554321'
     key = mc_task.get_public_key(mock.Mock())
     self.assertEqual('1234554321', key)