def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS, incremental_runner=INCREMENTAL_RUNNER): LOG.debug("Running backup %(id)s.", backup_info) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = incremental_runner LOG.debug("Using incremental backup runner: %s.", runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) self.stream_backup_to_storage(backup_info, runner, storage, parent_metadata, extra_opts)
def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner of type %s", backup_info['type']) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy") storage_strategy = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) LOG.debug("Preparing storage to download stream.") download_stream = storage_strategy.load(context, backup_info['location'], restore_runner.is_zipped, backup_info['checksum']) with restore_runner(restore_stream=download_stream, restore_location=restore_location) as runner: LOG.debug("Restoring instance from backup %s to %s", backup_info['id'], restore_location) content_size = runner.restore() LOG.info("Restore from backup %s completed successfully to %s", backup_info['id'], restore_location) LOG.info("Restore size: %s", content_size) except Exception as e: LOG.error(e) LOG.error("Error restoring backup %s", backup_info['id']) raise else: LOG.info("Restored Backup %s", backup_info['id'])
def execute_restore(self, context, backup_id, restore_location): try: LOG.debug("Cleaning out restore location: %s", restore_location) utils.execute_with_timeout("sudo", "chmod", "-R", "0777", restore_location) utils.clean_out(restore_location) LOG.debug("Finding backup %s to restore", backup_id) backup = DBBackup.find_by(id=backup_id) LOG.debug("Getting Restore Runner of type %s", backup.backup_type) restore_runner = self._get_restore_runner(backup.backup_type) LOG.debug("Getting Storage Strategy") storage_strategy = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) LOG.debug("Preparing storage to download stream.") download_stream = storage_strategy.load(context, backup.location, restore_runner.is_zipped, backup.checksum) with restore_runner(restore_stream=download_stream, restore_location=restore_location) as runner: LOG.debug("Restoring instance from backup %s to %s", backup_id, restore_location) content_size = runner.restore() LOG.info("Restore from backup %s completed successfully to %s", backup_id, restore_location) LOG.info("Restore size: %s", content_size) utils.execute_with_timeout("sudo", "chown", "-R", "mysql", restore_location) except Exception as e: LOG.error(e) LOG.error("Error restoring backup %s", backup_id) raise else: LOG.info("Restored Backup %s", backup_id)
def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner %(type)s", backup_info) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy") storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) runner = restore_runner(storage, location=backup_info['location'], checksum=backup_info['checksum'], restore_location=restore_location) backup_info['restore_location'] = restore_location LOG.debug("Restoring instance from backup %(id)s to " "%(restore_location)s" % backup_info) content_size = runner.restore() LOG.info(_("Restore from backup %(id)s completed successfully " "to %(restore_location)s") % backup_info) LOG.info(_("Restore size: %s") % content_size) except Exception as e: LOG.error(e) LOG.error(_("Error restoring backup %(id)s") % backup_info) raise else: LOG.info(_("Restored Backup %(id)s") % backup_info)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS, incremental_runner=INCREMENTAL_RUNNER): LOG.debug("Running backup %(id)s.", backup_info) storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = incremental_runner LOG.debug("Using incremental backup runner: %s.", runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) self.stream_backup_to_storage(context, backup_info, runner, storage, parent_metadata, extra_opts)
def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner of type %s", backup_info['type']) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy") storage_strategy = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) LOG.debug("Preparing storage to download stream.") download_stream = storage_strategy.load(context, backup_info['location'], restore_runner.is_zipped, backup_info['checksum']) with restore_runner(restore_stream=download_stream, restore_location=restore_location) as runner: LOG.debug("Restoring instance from backup %s to %s", backup_info['id'], restore_location) content_size = runner.restore() LOG.info("Restore from backup %s completed successfully to %s", backup_info['id'], restore_location) LOG.info("Restore size: %s", content_size) except Exception as e: LOG.error(e) LOG.error("Error restoring backup %s", backup_info['id']) raise else: LOG.info("Restored Backup %s", backup_info['id'])
def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner %(type)s.", backup_info) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy.") storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) runner = restore_runner(storage, location=backup_info['location'], checksum=backup_info['checksum'], restore_location=restore_location) backup_info['restore_location'] = restore_location LOG.debug( "Restoring instance from backup %(id)s to " "%(restore_location)s.", backup_info) content_size = runner.restore() LOG.debug( "Restore from backup %(id)s completed successfully " "to %(restore_location)s.", backup_info) LOG.debug("Restore size: %s.", content_size) except Exception: LOG.exception(_("Error restoring backup %(id)s.") % backup_info) raise else: LOG.debug("Restored backup %(id)s." % backup_info)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): LOG.debug("Searching for backup instance %s", backup_info['id']) ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_info['id']) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], size=stats.get('used', 0.0), state=BackupState.BUILDING) with runner(filename=backup_info['id'], extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_info['id']) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_info['id'], success) LOG.info("Backup %s file size: %s", backup_info['id'], bkup.content_length) LOG.info('Backup %s file swift checksum: %s', backup_info['id'], checksum) LOG.info('Backup %s location: %s', backup_info['id'], location) if not success: raise BackupError(note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], state=BackupState.FAILED) raise else: LOG.info("Saving %s Backup Info to model", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], checksum=checksum, location=location, note=note, backup_type=bkup.backup_type, state=BackupState.COMPLETED)
def snapshot_for_replication(self, context, service, location, snapshot_info): snapshot_id = snapshot_info["id"] storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) AGENT.stream_backup_to_storage(snapshot_info, REPL_BACKUP_RUNNER, storage, {}, REPL_EXTRA_OPTS) # With streamed InnobackupEx, the log position is in # the stream and will be decoded by the slave log_position = {} return snapshot_id, log_position
def execute_backup(self, context, backup_id, runner=RUNNER): LOG.debug("Searching for backup instance %s", backup_id) backup = DBBackup.find_by(id=backup_id) LOG.info("Setting task state to %s for instance %s", BackupState.NEW, backup.instance_id) backup.state = BackupState.NEW backup.save() LOG.info("Running backup %s", backup_id) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) backup.size = stats.get('used', 0.0) backup.state = BackupState.BUILDING backup.save() try: with runner(filename=backup_id, user=user, password=password)\ as bkup: LOG.info("Starting Backup %s", backup_id) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_id, success) LOG.info("Backup %s file size: %s", backup_id, bkup.content_length) LOG.info('Backup %s file checksum: %s', backup_id, checksum) LOG.info('Backup %s location: %s', backup_id, location) if not success: raise BackupError(backup.note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_id) backup.state = BackupState.FAILED backup.save() raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.state = BackupState.COMPLETED backup.checksum = checksum backup.location = location backup.note = note backup.backup_type = bkup.backup_type backup.save()
def execute_backup(self, context, backup_id, runner=RUNNER): LOG.debug("Searching for backup instance %s", backup_id) backup = DBBackup.find_by(id=backup_id) LOG.info("Setting task state to %s for instance %s", BackupState.NEW, backup.instance_id) backup.state = BackupState.NEW backup.save() LOG.info("Running backup %s", backup_id) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) backup.size = stats.get('used', 0.0) backup.state = BackupState.BUILDING backup.save() try: with runner(filename=backup_id, user=user, password=password)\ as bkup: LOG.info("Starting Backup %s", backup_id) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_id, success) LOG.info("Backup %s file size: %s", backup_id, bkup.content_length) LOG.info('Backup %s swift checksum: %s', backup_id, checksum) LOG.info('Backup %s location: %s', backup_id, location) if not success: raise BackupError(backup.note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_id) backup.state = BackupState.FAILED backup.save() raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.state = BackupState.COMPLETED backup.checksum = checksum backup.location = location backup.note = note backup.backup_type = bkup.backup_type backup.save()
def snapshot_for_replication(self, context, service, location, snapshot_info): snapshot_id = snapshot_info['id'] storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) AGENT.stream_backup_to_storage(snapshot_info, REPL_BACKUP_RUNNER, storage, {}, REPL_EXTRA_OPTS) # With streamed InnobackupEx, the log position is in # the stream and will be decoded by the slave log_position = {} return snapshot_id, log_position
def execute_restore(self, context, backup_id, restore_location): try: LOG.debug("Cleaning out restore location: %s", restore_location) utils.execute_with_timeout("sudo", "chmod", "-R", "0777", restore_location) utils.clean_out(restore_location) LOG.debug("Finding backup %s to restore", backup_id) backup = DBBackup.find_by(id=backup_id) LOG.debug("Getting Restore Runner of type %s", backup.backup_type) restore_runner = self._get_restore_runner(backup.backup_type) LOG.debug("Getting Storage Strategy") storage_strategy = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) LOG.debug("Preparing storage to download stream.") download_stream = storage_strategy.load(context, backup.location, restore_runner.is_zipped, backup.checksum) with restore_runner(restore_stream=download_stream, restore_location=restore_location) as runner: LOG.debug("Restoring instance from backup %s to %s", backup_id, restore_location) content_size = runner.restore() LOG.info("Restore from backup %s completed successfully to %s", backup_id, restore_location) LOG.info("Restore size: %s", content_size) utils.execute_with_timeout("sudo", "chown", "-R", "mysql", restore_location) except Exception as e: LOG.error(e) LOG.error("Error restoring backup %s", backup_id) raise else: LOG.info("Restored Backup %s", backup_id)
def snapshot_for_replication(self, context, service, location, snapshot_info): snapshot_id = snapshot_info['id'] storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) AGENT.stream_backup_to_storage(snapshot_info, REPL_BACKUP_RUNNER, storage, {}, REPL_EXTRA_OPTS) replication_user = self._create_replication_user() service.grant_replication_privilege(replication_user) # With streamed InnobackupEx, the log position is in # the stream and will be decoded by the slave log_position = { 'replication_user': replication_user } return snapshot_id, log_position
def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner %(type)s.", backup_info) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy.") storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) runner = restore_runner(storage, location=backup_info['location'], checksum=backup_info['checksum'], restore_location=restore_location) backup_info['restore_location'] = restore_location LOG.debug("Restoring instance from backup %(id)s to " "%(restore_location)s.", backup_info) content_size = runner.restore() #rds-start backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) backup_state = {'backup_id': backup_id, 'state': BackupState.COMPLETED,} conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) #rds-end LOG.debug("Restore from backup %(id)s completed successfully " "to %(restore_location)s.", backup_info) LOG.debug("Restore size: %s.", content_size) except Exception: LOG.exception(_("Error restoring backup %(id)s.") % backup_info) raise else: LOG.debug("Restored backup %(id)s." % backup_info)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): LOG.debug("Searching for backup instance %s", backup_info['id']) ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_info['id']) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], size=stats.get('used', 0.0), state=BackupState.BUILDING) with runner(filename=backup_info['id'], extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_info['id']) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_info['id'], success) LOG.info("Backup %s file size: %s", backup_info['id'], bkup.content_length) LOG.info('Backup %s file swift checksum: %s', backup_info['id'], checksum) LOG.info('Backup %s location: %s', backup_info['id'], location) if not success: raise BackupError(note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], state=BackupState.FAILED) raise else: LOG.info("Saving %s Backup Info to model", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], checksum=checksum, location=location, note=note, backup_type=bkup.backup_type, state=BackupState.COMPLETED)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF.datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info(_("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info(_("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info(_("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception(_("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_id) user = ADMIN_USER_NAME password = get_auth_password() storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) LOG.info("Backup %s completed status: %s", backup_id, success) LOG.info('Backup %s file swift checksum: %s', backup_id, checksum) LOG.info('Backup %s location: %s', backup_id, location) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'backup_type': bkup.backup_type, }) if not success: raise BackupError(note) except Exception: LOG.exception("Error saving %s Backup", backup_id) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, **backup) raise except Exception: LOG.exception("Error running backup: %s", backup_id) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, **backup) raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, **backup)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF. datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info( _("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info( _("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info( _("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception( _("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)