def test_add_auth_key(self): # Test adding a key ('4') to an existing list ('1', '2', '3') self.cons_w.ssh_authorized_keys = tuple(['1', '2', '3']) mc_task.add_authorized_key(mock.Mock(), '4') self.assertEqual(['1', '2', '3', '4'], self.cons_w.ssh_authorized_keys) self.cons_w.update.assert_called_once_with() # Test we don't call update when not needed. self.cons_w.reset_mock() mc_task.add_authorized_key(mock.Mock(), '2') self.assertEqual(0, self.cons_w.update.called) # Test the transaction retry self.cons_w.reset_mock() resp = mock.Mock(status=const.HTTPStatus.ETAG_MISMATCH) self.cons_w.update.side_effect = exc.HttpError(resp) # When the transaction decorator refreshes the mgmt console wrapper # then we know it's retrying so just raise an exception and bail self.cons_w.refresh.side_effect = ValueError() self.assertRaises(ValueError, mc_task.add_authorized_key, mock.Mock(), '5') # Ensure it really was refresh that caused the exception self.assertEqual(1, self.cons_w.refresh.call_count) # And that our update was called self.assertEqual(1, self.cons_w.update.call_count)
def pre_live_migration(self, context, block_device_info, network_info, disk_info, migrate_data, vol_drvs): """Prepare an instance for live migration :param context: security context :param block_device_info: instance block device information :param network_info: instance network information :param disk_info: instance disk information :param migrate_data: implementation specific data dict :param vol_drvs: volume drivers for the attached volumes """ LOG.debug('Running pre live migration on destination.', instance=self.instance) LOG.debug('Migration data: %s' % migrate_data) # Set the ssh auth key if needed. src_mig_data = migrate_data.get('migrate_data', {}) pub_key = src_mig_data.get('public_key') if pub_key is not None: mgmt_task.add_authorized_key(self.drvr.adapter, pub_key) # For each volume, make sure it's ready to migrate dest_mig_data = {} for vol_drv in vol_drvs: LOG.info(_LI('Performing pre migration for volume %(volume)s'), dict(volume=vol_drv.volume_id)) try: vol_drv.pre_live_migration_on_destination(src_mig_data, dest_mig_data) except Exception as e: LOG.exception(e) # It failed. vol_exc = LiveMigrationVolume( host=self.drvr.host_wrapper.system_name, name=self.instance.name, volume=vol_drv.volume_id) raise exception.MigrationPreCheckError(reason=vol_exc.message) # Scrub stale/orphan mappings and storage to minimize probability of # collisions on the destination. stor_task.ComprehensiveScrub(self.drvr.adapter).execute() # Save the migration data, we'll use it if the LPM fails self.pre_live_data = dest_mig_data return dest_mig_data
def pre_live_migration(self, context, block_device_info, network_info, disk_info, migrate_data, vol_drvs): """Prepare an instance for live migration :param context: security context :param block_device_info: instance block device information :param network_info: instance network information :param disk_info: instance disk information :param migrate_data: implementation specific data dict :param vol_drvs: volume drivers for the attached volumes """ LOG.debug('Running pre live migration on destination.', instance=self.instance) LOG.debug('Migration data: %s' % migrate_data) # Set the ssh auth key if needed. src_mig_data = migrate_data.get('migrate_data', {}) pub_key = src_mig_data.get('public_key') if pub_key is not None: mgmt_task.add_authorized_key(self.drvr.adapter, pub_key) # For each volume, make sure it's ready to migrate dest_mig_data = {} for vol_drv in vol_drvs: LOG.info(_LI('Performing pre migration for volume %(volume)s'), dict(volume=vol_drv.volume_id)) try: vol_drv.pre_live_migration_on_destination( src_mig_data, dest_mig_data) except Exception as e: LOG.exception(e) # It failed. vol_exc = LiveMigrationVolume( host=self.drvr.host_wrapper.system_name, name=self.instance.name, volume=vol_drv.volume_id) raise exception.MigrationPreCheckError(reason=vol_exc.message) # Scrub stale/orphan mappings and storage to minimize probability of # collisions on the destination. stor_task.ComprehensiveScrub(self.drvr.adapter).execute() # Save the migration data, we'll use it if the LPM fails self.pre_live_data = dest_mig_data return dest_mig_data