예제 #1
0
    def init_host(self, host):
        """Initialize anything that is necessary for the driver to function.

        Includes catching up with currently running VMs on the given host.
        """
        # Build the adapter. May need to attempt the connection multiple times
        # in case the PowerVM management API service is starting.
        # TODO(efried): Implement async compute service enable/disable like
        # I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
        self.adapter = pvm_apt.Adapter(
            pvm_apt.Session(conn_tries=60),
            helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
        # Make sure the Virtual I/O Server(s) are available.
        pvm_par.validate_vios_ready(self.adapter)
        self.host_wrapper = pvm_ms.System.get(self.adapter)[0]

        # Do a scrub of the I/O plane to make sure the system is in good shape
        LOG.info("Clearing stale I/O connections on driver init.")
        pvm_stor.ComprehensiveScrub(self.adapter).execute()

        # Initialize the disk adapter
        # TODO(efried): Other disk adapters (localdisk), by conf selection.
        self.disk_dvr = ssp.SSPDiskAdapter(self.adapter,
                                           self.host_wrapper.uuid)
        self.image_api = image.API()

        LOG.info("The PowerVM compute driver has been initialized.")
예제 #2
0
    def init_host(self, host):
        """Initialize anything that is necessary for the driver to function.

        Includes catching up with currently running VMs on the given host.
        """
        LOG.warning(
            'The powervm virt driver is deprecated and may be removed in a '
            'future release. The driver is not tested by the OpenStack '
            'project nor does it have clear maintainers and thus its quality'
            'can not be ensured. If you are using the driver in production '
            'please let us know the openstack-discuss mailing list or on IRC'
        )

        # Build the adapter. May need to attempt the connection multiple times
        # in case the PowerVM management API service is starting.
        # TODO(efried): Implement async compute service enable/disable like
        # I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
        self.adapter = pvm_apt.Adapter(
            pvm_apt.Session(conn_tries=60),
            helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
        # Make sure the Virtual I/O Server(s) are available.
        pvm_par.validate_vios_ready(self.adapter)
        self.host_wrapper = pvm_ms.System.get(self.adapter)[0]

        # Do a scrub of the I/O plane to make sure the system is in good shape
        LOG.info("Clearing stale I/O connections on driver init.")
        pvm_stor.ComprehensiveScrub(self.adapter).execute()

        # Initialize the disk adapter
        self.disk_dvr = importutils.import_object_ns(
            DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
            self.adapter, self.host_wrapper.uuid)
        self.image_api = glance.API()

        LOG.info("The PowerVM compute driver has been initialized.")
예제 #3
0
    def pre_live_migration(self, context, block_device_info, network_infos,
                           disk_info, migrate_data, vol_drvs):
        """Prepare an instance for live migration

        :param context: security context
        :param block_device_info: instance block device information
        :param network_infos: instance network information
        :param disk_info: instance disk information
        :param migrate_data: a PowerVMLiveMigrateData object
        :param vol_drvs: volume drivers for the attached volumes
        """
        LOG.debug(
            'Running pre live migration on destination. Migration data: '
            '%s',
            migrate_data,
            instance=self.instance)

        # Set the ssh auth key.
        mgmt_task.add_authorized_key(self.drvr.adapter,
                                     migrate_data.public_key)

        # For each network info, run the pre-live migration.  This tells the
        # system what the target vlans will be.
        vea_vlan_mappings = {}
        for network_info in network_infos:
            vif.pre_live_migrate_at_destination(self.drvr.adapter,
                                                self.drvr.host_uuid,
                                                self.instance, network_info,
                                                vea_vlan_mappings)
        migrate_data.vea_vlan_mappings = vea_vlan_mappings

        # For each volume, make sure it's ready to migrate
        for vol_drv in vol_drvs:
            LOG.info('Performing pre migration for volume %(volume)s',
                     dict(volume=vol_drv.volume_id),
                     instance=self.instance)
            try:
                vol_drv.pre_live_migration_on_destination(
                    migrate_data.vol_data)
            except Exception:
                LOG.exception(
                    "PowerVM error preparing instance for live "
                    "migration.",
                    instance=self.instance)
                # It failed.
                vol_exc = LiveMigrationVolume(
                    host=self.drvr.host_wrapper.system_name,
                    name=self.instance.name,
                    volume=vol_drv.volume_id)
                raise exception.MigrationPreCheckError(reason=vol_exc.message)

        # Scrub stale/orphan mappings and storage to minimize probability of
        # collisions on the destination.
        stor_task.ComprehensiveScrub(self.drvr.adapter).execute()

        # Save the migration data, we'll use it if the LPM fails
        self.pre_live_vol_data = migrate_data.vol_data
        return migrate_data
예제 #4
0
    def pre_live_migration(self, context, block_device_info, network_info,
                           disk_info, migrate_data, vol_drvs):
        """Prepare an instance for live migration

        :param context: security context
        :param block_device_info: instance block device information
        :param network_info: instance network information
        :param disk_info: instance disk information
        :param migrate_data: implementation specific data dict
        :param vol_drvs: volume drivers for the attached volumes
        """
        LOG.debug('Running pre live migration on destination.',
                  instance=self.instance)
        LOG.debug('Migration data: %s' % migrate_data)

        # Set the ssh auth key if needed.
        src_mig_data = migrate_data.get('migrate_data', {})
        pub_key = src_mig_data.get('public_key')
        if pub_key is not None:
            mgmt_task.add_authorized_key(self.drvr.adapter, pub_key)

        # For each volume, make sure it's ready to migrate
        dest_mig_data = {}
        for vol_drv in vol_drvs:
            LOG.info(_LI('Performing pre migration for volume %(volume)s'),
                     dict(volume=vol_drv.volume_id))
            try:
                vol_drv.pre_live_migration_on_destination(
                    src_mig_data, dest_mig_data)
            except Exception as e:
                LOG.exception(e)
                # It failed.
                vol_exc = LiveMigrationVolume(
                    host=self.drvr.host_wrapper.system_name,
                    name=self.instance.name,
                    volume=vol_drv.volume_id)
                raise exception.MigrationPreCheckError(reason=vol_exc.message)

        # Scrub stale/orphan mappings and storage to minimize probability of
        # collisions on the destination.
        stor_task.ComprehensiveScrub(self.drvr.adapter).execute()

        # Save the migration data, we'll use it if the LPM fails
        self.pre_live_data = dest_mig_data
        return dest_mig_data
예제 #5
0
 def test_comprehensive_scrub(self, mock_wrap, mock_stale_lids,
                              mock_rm_vopts, mock_rm_vdisks):
     # Don't confuse the 'update' call count with the VG POST
     mock_rm_vopts.return_value = None
     mock_rm_vdisks.return_value = None
     # Three "stale" LPARs in addition to the orphans.  These LPAR IDs are
     # represented in both VSCSI and VFC mappings.
     mock_stale_lids.return_value = [15, 18, 22]
     # Make sure all our "stale" lpars hit.
     mock_wrap.return_value = []
     vwrap = self.vio_feed[0]
     # Save the "before" sizes of the mapping lists
     vscsi_len = len(vwrap.scsi_mappings)
     vfc_len = len(vwrap.vfc_mappings)
     ts.ComprehensiveScrub(self.adpt).execute()
     # The right number of maps remain.
     self.assertEqual(vscsi_len - 21, len(vwrap.scsi_mappings))
     self.assertEqual(vfc_len - 22, len(vwrap.vfc_mappings))
     self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(1, mock_rm_vopts.call_count)
     self.assertEqual(1, mock_rm_vdisks.call_count)
예제 #6
0
    def init_recreate_map(self, adapter, vol_drv_iter):
        """To be used on a target system.  Builds the 'slot recreate' map.

        This is to initialize on the target system how the client slots should
        be rebuilt on the client VM.

        This should not be called unless it is a VM recreate.

        :param adapter: The pypowervm adapter.
        :param vol_drv_iter: An iterator of the volume drivers.
        """
        # This should only be called on a rebuild. Focus on being correct
        # first. Performance is secondary.

        # We need to scrub existing stale mappings, including those for the VM
        # we're creating.  It is critical that this happen *before* we create
        # any of the mappings we actually want this VM to have.
        scrub_ftsk = pvm_tstor.ComprehensiveScrub(adapter)
        scrub_ftsk.execute()
        self._vios_wraps = scrub_ftsk.feed

        pv_vscsi_vol_to_vio = {}
        fabric_names = []
        for bdm, vol_drv in vol_drv_iter:
            if vol_drv.vol_type() in ['vscsi', 'fileio', 'rbd']:
                self._pv_vscsi_vol_to_vio(vol_drv, pv_vscsi_vol_to_vio)
            elif len(fabric_names) == 0 and vol_drv.vol_type() == 'npiv':
                fabric_names = vol_drv._fabric_names()

        # Run the full initialization now that we have the pre-requisite data
        try:
            self._build_map = slot_map.RebuildSlotMap(self, self._vios_wraps,
                                                      pv_vscsi_vol_to_vio,
                                                      fabric_names)
        except pvm_exc.InvalidHostForRebuild as e:
            raise p_exc.InvalidRebuild(error=six.text_type(e))