def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): self.enable_backups() self.replication.enable_as_master(None, None) snapshot_id, log_position = ( self.replication.snapshot_for_replication(context, None, None, snapshot_info)) mount_point = CONF.get(self.manager).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(None, snapshot_info), 'log_position': log_position } return replication_snapshot
def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") app = self.mysql_app(self.mysql_app_status.get()) replication = self.replication_strategy_class(context) replication.enable_as_master(app, replica_source_config) snapshot_id, log_position = (replication.snapshot_for_replication( context, app, None, snapshot_info)) mount_point = CONF.get(self.manager).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': replication.get_master_ref(app, snapshot_info), 'log_position': log_position } return replication_snapshot
def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") app = MySqlApp(MySqlAppStatus.get()) replication = REPLICATION_STRATEGY_CLASS(context) replication.enable_as_master(app, replica_source_config) snapshot_id, log_position = ( replication.snapshot_for_replication(context, app, None, snapshot_info)) mount_point = CONF.get(MANAGER).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': MANAGER, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': REPLICATION_STRATEGY, 'master': replication.get_master_ref(app, snapshot_info), 'log_position': log_position } return replication_snapshot
def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") app = self.mysql_app(self.mysql_app_status.get()) replication = self.replication_strategy_class(context) replication.enable_as_master(app, replica_source_config) snapshot_id, log_position = ( replication.snapshot_for_replication(context, app, None, snapshot_info)) mount_point = CONF.get(self.manager).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': replication.get_master_ref(app, snapshot_info), 'log_position': log_position } return replication_snapshot
def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") replication = REPLICATION_STRATEGY_CLASS(context) replication.enable_as_master(self._app, replica_source_config) snapshot_id, log_position = (replication.snapshot_for_replication( context, self._app, None, snapshot_info)) mount_point = CONF.get(MANAGER).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': MANAGER, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': REPLICATION_STRATEGY, 'master': replication.get_master_ref(self._app, snapshot_info), 'log_position': log_position } return replication_snapshot
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): LOG.debug("Searching for backup instance %s", backup_info['id']) ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_info['id']) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], size=stats.get('used', 0.0), state=BackupState.BUILDING) with runner(filename=backup_info['id'], extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_info['id']) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_info['id'], success) LOG.info("Backup %s file size: %s", backup_info['id'], bkup.content_length) LOG.info('Backup %s file swift checksum: %s', backup_info['id'], checksum) LOG.info('Backup %s location: %s', backup_info['id'], location) if not success: raise BackupError(note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], state=BackupState.FAILED) raise else: LOG.info("Saving %s Backup Info to model", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], checksum=checksum, location=location, note=note, backup_type=bkup.backup_type, state=BackupState.COMPLETED)
def test_get_filesystem_volume_stats(self): when(os).statvfs(any()).thenReturn(MockStats) result = get_filesystem_volume_stats('/some/path/') self.assertEqual(result['block_size'], 4096) self.assertEqual(result['total_blocks'], 1048576) self.assertEqual(result['free_blocks'], 524288) self.assertEqual(result['total'], 4294967296) self.assertEqual(result['free'], 2147483648) self.assertEqual(result['used'], 2.0)
def test_get_filesystem_volume_stats(self): when(os).statvfs(any()).thenReturn(MockStats) result = get_filesystem_volume_stats("/some/path/") self.assertEqual(result["block_size"], 4096) self.assertEqual(result["total_blocks"], 1048576) self.assertEqual(result["free_blocks"], 524288) self.assertEqual(result["total"], 4294967296) self.assertEqual(result["free"], 2147483648) self.assertEqual(result["used"], 2.0)
def _validate_slave_for_replication(self, context, replica_info): if (replica_info['replication_strategy'] != REPLICATION_STRATEGY): raise exception.IncompatibleReplicationStrategy( replica_info.update({'guest_strategy': REPLICATION_STRATEGY})) mount_point = CONF.get(MANAGER).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) if (volume_stats.get('total', 0.0) < replica_info['dataset']['dataset_size']): raise exception.InsufficientSpaceForReplica( replica_info.update( {'slave_volume_size': volume_stats.get('total', 0.0)}))
def execute_backup(self, context, backup_id, runner=RUNNER): LOG.debug("Searching for backup instance %s", backup_id) backup = DBBackup.find_by(id=backup_id) LOG.info("Setting task state to %s for instance %s", BackupState.NEW, backup.instance_id) backup.state = BackupState.NEW backup.save() LOG.info("Running backup %s", backup_id) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) backup.size = stats.get('used', 0.0) backup.state = BackupState.BUILDING backup.save() try: with runner(filename=backup_id, user=user, password=password)\ as bkup: LOG.info("Starting Backup %s", backup_id) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_id, success) LOG.info("Backup %s file size: %s", backup_id, bkup.content_length) LOG.info('Backup %s file checksum: %s', backup_id, checksum) LOG.info('Backup %s location: %s', backup_id, location) if not success: raise BackupError(backup.note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_id) backup.state = BackupState.FAILED backup.save() raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.state = BackupState.COMPLETED backup.checksum = checksum backup.location = location backup.note = note backup.backup_type = bkup.backup_type backup.save()
def execute_backup(self, context, backup_id, runner=RUNNER): LOG.debug("Searching for backup instance %s", backup_id) backup = DBBackup.find_by(id=backup_id) LOG.info("Setting task state to %s for instance %s", BackupState.NEW, backup.instance_id) backup.state = BackupState.NEW backup.save() LOG.info("Running backup %s", backup_id) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) backup.size = stats.get('used', 0.0) backup.state = BackupState.BUILDING backup.save() try: with runner(filename=backup_id, user=user, password=password)\ as bkup: LOG.info("Starting Backup %s", backup_id) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_id, success) LOG.info("Backup %s file size: %s", backup_id, bkup.content_length) LOG.info('Backup %s swift checksum: %s', backup_id, checksum) LOG.info('Backup %s location: %s', backup_id, location) if not success: raise BackupError(backup.note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_id) backup.state = BackupState.FAILED backup.save() raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.state = BackupState.COMPLETED backup.checksum = checksum backup.location = location backup.note = note backup.backup_type = bkup.backup_type backup.save()
def _validate_slave_for_replication(self, context, replica_info): if (replica_info['replication_strategy'] != self.replication_strategy): raise exception.IncompatibleReplicationStrategy( replica_info.update({ 'guest_strategy': self.replication_strategy })) mount_point = CONF.get(self.manager).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) if (volume_stats.get('total', 0.0) < replica_info['dataset']['dataset_size']): raise exception.InsufficientSpaceForReplica( replica_info.update({ 'slave_volume_size': volume_stats.get('total', 0.0) }))
def _validate_slave_for_replication(self, context, snapshot): if (snapshot['replication_strategy'] != REPLICATION_STRATEGY): raise exception.IncompatibleReplicationStrategy( snapshot.update({ 'guest_strategy': REPLICATION_STRATEGY })) mount_point = CONF.get(MANAGER).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) if (volume_stats.get('total', 0.0) < snapshot['dataset']['dataset_size']): raise exception.InsufficientSpaceForReplica( snapshot.update({ 'slave_volume_size': volume_stats.get('total', 0.0) }))
def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") snapshot_id, log_position = ( self.replication.snapshot_for_replication(context, self.app, None, snapshot_info)) mount_point = CONF.get(self.manager).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(self.app, snapshot_info), 'log_position': log_position, 'replica_number': snapshot_info['replica_number'] } return replication_snapshot
def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") snapshot_id, log_position = (self.replication.snapshot_for_replication( context, self.app, None, snapshot_info)) mount_point = CONF.get(self.manager).mount_point volume_stats = dbaas.get_filesystem_volume_stats(mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(self.app, snapshot_info), 'log_position': log_position, 'replica_number': snapshot_info['replica_number'] } return replication_snapshot
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" mount_point = CONF.get("mysql" if not MANAGER else MANAGER).mount_point return dbaas.get_filesystem_volume_stats(mount_point)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF.datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info(_("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info(_("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info(_("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception(_("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)
def get_filesystem_stats(self, context, fs_path): return dbaas.get_filesystem_volume_stats(fs_path)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Finding the file-systems stats.") mount_point = CONF.get(MANAGER).mount_point return dbaas.get_filesystem_volume_stats(mount_point)
def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.debug("Starting backup %s.", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug( "Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug( "Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) meta = bkup.metadata() meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info['datastore_version'] storage.save_metadata(location, meta) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( _("Error saving backup: %(backup_id)s.") % backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info(_("Completed backup %(backup_id)s.") % backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" mount_point = CONF.get(self.manager).mount_point return dbaas.get_filesystem_volume_stats(mount_point)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" # TODO(peterstac) - note that fs_path is not used in this method. mount_point = CONF.get(self.manager).mount_point LOG.debug("Getting file system stats for '%s'", mount_point) return dbaas.get_filesystem_volume_stats(mount_point)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF. datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info( _("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info( _("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info( _("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception( _("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)
def get_filesystem_stats(self, context, fs_path): mount_point = CONF.get(CONF.datastore_manager).mount_point return dbaas.get_filesystem_volume_stats(mount_point)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Getting file system status.") # TODO(peterstac) - why is this hard-coded? return dbaas.get_filesystem_volume_stats(system.MONGODB_MOUNT_POINT)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" return dbaas.get_filesystem_volume_stats(system.MONGODB_MOUNT_POINT)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" # TODO(peterstac) - note that fs_path is not used in this method. mount_point = CONF.get(self.manager).mount_point LOG.debug("Getting file system stats for '%s'" % mount_point) return dbaas.get_filesystem_volume_stats(mount_point)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_id) user = ADMIN_USER_NAME password = get_auth_password() storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) LOG.info("Backup %s completed status: %s", backup_id, success) LOG.info('Backup %s file swift checksum: %s', backup_id, checksum) LOG.info('Backup %s location: %s', backup_id, location) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'backup_type': bkup.backup_type, }) if not success: raise BackupError(note) except Exception: LOG.exception("Error saving %s Backup", backup_id) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, **backup) raise except Exception: LOG.exception("Error running backup: %s", backup_id) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, **backup) raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, **backup)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Get Filesystem Stats.") mount_point = CONF.get('mysql' if not MANAGER else MANAGER).mount_point return dbaas.get_filesystem_volume_stats(mount_point)
def get_filesystem_stats(self, context, fs_path): """ Gets file system stats from the provided fs_path. """ return dbaas.get_filesystem_volume_stats(fs_path)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given """ return dbaas.get_filesystem_volume_stats(fs_path)
def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.debug("Starting backup %s.", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug("Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug("Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) meta = bkup.metadata() meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info[ 'datastore_version'] storage.save_metadata(location, meta) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( _("Error saving backup: %(backup_id)s.") % backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info(_("Completed backup %(backup_id)s.") % backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("In get_filesystem_stats: fs_path= %s" % fs_path) mount_point = CONF.get( 'mysql' if not MANAGER else MANAGER).mount_point return dbaas.get_filesystem_volume_stats(mount_point)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): LOG.debug("Searching for backup instance %s", backup_info['id']) ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_info['id']) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], size=stats.get('used', 0.0), state=BackupState.BUILDING) with runner(filename=backup_info['id'], extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_info['id']) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_info['id'], success) LOG.info("Backup %s file size: %s", backup_info['id'], bkup.content_length) LOG.info('Backup %s file swift checksum: %s', backup_info['id'], checksum) LOG.info('Backup %s location: %s', backup_info['id'], location) if not success: raise BackupError(note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], state=BackupState.FAILED) raise else: LOG.info("Saving %s Backup Info to model", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], checksum=checksum, location=location, note=note, backup_type=bkup.backup_type, state=BackupState.COMPLETED)