def set_status(self, status): """Use conductor to update the DB app status.""" LOG.debug("Casting set_status message to conductor.") context = trove_context.TroveContext() heartbeat = {"service_status": status.description} conductor_api.API(context).heartbeat(CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status
def test_heartbeat_older_timestamp_discarded(self): new_p = {'service_status': ServiceStatuses.NEW.description} build_p = {'service_status': ServiceStatuses.BUILDING.description} iss_id = self._create_iss() iss = self._get_iss(iss_id) now = timeutils.float_utcnow() past = now - 60 self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=past) self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=past) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.NEW, iss.status)
def test_heartbeat_newer_timestamp_accepted(self): new_p = {'service_status': ServiceStatuses.NEW.description} build_p = {'service_status': ServiceStatuses.BUILDING.description} iss_id = self._create_iss() iss = self._get_iss(iss_id) now = timeutils.float_utcnow() future = now + 60 self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=now) self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=future) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.BUILDING, iss.status)
def set_status(self, status): """Use conductor to update the DB app status.""" LOG.debug("Casting set_status message to conductor.") context = trove_context.TroveContext() heartbeat = { 'service_status': status.description, } conductor_api.API(context).heartbeat(CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status
def test_backup_older_timestamp_discarded(self): old_name = "oldname" new_name = "renamed" bkup_id = self._create_backup(old_name) bkup = self._get_backup(bkup_id) now = timeutils.float_utcnow() past = now - 60 self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=now, name=old_name) self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=past, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(old_name, bkup.name)
def test_backup_newer_timestamp_accepted(self): old_name = "oldname" new_name = "renamed" bkup_id = self._create_backup(old_name) bkup = self._get_backup(bkup_id) now = timeutils.float_utcnow() future = now + 60 self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=now, name=old_name) self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=future, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(new_name, bkup.name)
def set_status(self, status): """Use conductor to update the DB app status.""" LOG.debug("Casting set_status message to conductor.") ctxt = context.TroveContext(user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) heartbeat = { 'service_status': status.description, } conductor_api.API(ctxt).heartbeat(CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status
def set_status(self, status, force=False): """Use conductor to update the DB app status.""" if force or self.is_installed: LOG.debug("Casting set_status message to conductor " "(status is '%s')." % status.description) context = trove_context.TroveContext() heartbeat = {'service_status': status.description} conductor_api.API(context).heartbeat( CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status else: LOG.debug("Prepare has not completed yet, skipping heartbeat.")
def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner %(type)s.", backup_info) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy.") storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) runner = restore_runner(storage, location=backup_info['location'], checksum=backup_info['checksum'], restore_location=restore_location) backup_info['restore_location'] = restore_location LOG.debug("Restoring instance from backup %(id)s to " "%(restore_location)s.", backup_info) content_size = runner.restore() #rds-start backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) backup_state = {'backup_id': backup_id, 'state': BackupState.COMPLETED,} conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) #rds-end LOG.debug("Restore from backup %(id)s completed successfully " "to %(restore_location)s.", backup_info) LOG.debug("Restore size: %s.", content_size) except Exception: LOG.exception(_("Error restoring backup %(id)s.") % backup_info) raise else: LOG.debug("Restored backup %(id)s." % backup_info)
def set_status(self, status, force=False): """Use conductor to update the DB app status.""" force_heartbeat_status = ( status == instance.ServiceStatuses.FAILED or status == instance.ServiceStatuses.BUILD_PENDING) if (not force_heartbeat_status and not force and (self.status == instance.ServiceStatuses.NEW or self.status == instance.ServiceStatuses.BUILDING)): LOG.debug("Prepare has not run yet, skipping heartbeat.") return LOG.debug("Casting set_status message to conductor (status is '%s')." % status.description) context = trove_context.TroveContext() heartbeat = { 'service_status': status.description, } conductor_api.API(context).heartbeat(CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status
def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.debug("Starting backup %s.", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug( "Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug( "Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) meta = bkup.metadata() meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info['datastore_version'] storage.save_metadata(location, meta) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( _("Error saving backup: %(backup_id)s.") % backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info(_("Completed backup %(backup_id)s.") % backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF.datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info(_("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info(_("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info(_("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception(_("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)
def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.debug("Starting backup %s.", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug("Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug("Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) meta = bkup.metadata() meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info[ 'datastore_version'] storage.save_metadata(location, meta) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( _("Error saving backup: %(backup_id)s.") % backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info(_("Completed backup %(backup_id)s.") % backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF. datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info( _("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info( _("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info( _("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception( _("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)