def _notifier(self): ''' Returns the notification for Trove API or TaskManager, otherwise returns an API to the conductor to whom to forward the notification ''' return (self.context.notification if self.context.notification.server_type in ['api', 'taskmanager'] else conductor_api.API(self.context))
def report_root(self, user): """Use conductor to update the root-enable status.""" LOG.debug("Casting report_root message to conductor.") ctxt = context.TroveContext(user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor_api.API(ctxt).report_root(CONF.guest_id, user) LOG.debug("Successfully cast report_root.")
def set_status(self, status): """Use conductor to update the DB app status.""" LOG.debug("Casting set_status message to conductor.") ctxt = context.TroveContext(user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) heartbeat = { 'service_status': status.description, } conductor_api.API(ctxt).heartbeat(CONF.guest_id, heartbeat) LOG.debug("Successfully cast set_status.") self.status = status
def set_status(self, status): """Use conductor to update the DB app status.""" LOG.debug("Casting set_status message to conductor.") context = trove_context.TroveContext() heartbeat = { 'service_status': status.description, } conductor_api.API(context).heartbeat(CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status
def set_status(self, status, force=False): """Use conductor to update the DB app status.""" if force or self.is_installed: LOG.debug("Casting set_status message to conductor " "(status is '%s')." % status.description) context = trove_context.TroveContext() heartbeat = {'service_status': status.description} conductor_api.API(context).heartbeat( CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status else: LOG.debug("Prepare has not completed yet, skipping heartbeat.")
def set_status(self, status, force=False): """Use conductor to update the DB app status.""" force_heartbeat_status = ( status == instance.ServiceStatuses.FAILED or status == instance.ServiceStatuses.BUILD_PENDING) if (not force_heartbeat_status and not force and (self.status == instance.ServiceStatuses.NEW or self.status == instance.ServiceStatuses.BUILDING)): LOG.debug("Prepare has not run yet, skipping heartbeat.") return LOG.debug("Casting set_status message to conductor (status is '%s')." % status.description) context = trove_context.TroveContext() heartbeat = { 'service_status': status.description, } conductor_api.API(context).heartbeat(CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status
def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.debug("Starting backup %s.", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug( "Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug( "Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) meta = bkup.metadata() meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info['datastore_version'] storage.save_metadata(location, meta) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( _("Error saving backup: %(backup_id)s.") % backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info(_("Completed backup %(backup_id)s.") % backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def report_root(self, context, user): """Use conductor to update the root-enable status.""" LOG.debug("Casting report_root message to conductor.") conductor_api.API(context).report_root(CONF.guest_id, user) LOG.debug("Successfully cast report_root.")
def create_backup(self, context, backup_info): storage_driver = CONF.storage_strategy backup_driver = cfg.get_configuration_property('backup_strategy') incremental = '' backup_type = 'full' if backup_info.get('parent'): incremental = ( f'--incremental ' f'--parent-location={backup_info["parent"]["location"]} ' f'--parent-checksum={backup_info["parent"]["checksum"]}') backup_type = 'incremental' backup_id = backup_info["id"] image = CONF.backup_docker_image name = 'db_backup' volumes = {'/var/lib/mysql': {'bind': '/var/lib/mysql', 'mode': 'rw'}} admin_pass = self.get_auth_password() user_token = context.auth_token auth_url = CONF.service_credentials.auth_url user_tenant = context.project_id metadata = f'datastore:{backup_info["datastore"]},' \ f'datastore_version:{backup_info["datastore_version"]}' command = ( f'/usr/bin/python3 main.py --backup --backup-id={backup_id} ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'--db-user=os_admin --db-password={admin_pass} ' f'--db-host=127.0.0.1 ' f'--os-token={user_token} --os-auth-url={auth_url} ' f'--os-tenant-id={user_tenant} ' f'--swift-extra-metadata={metadata} ' f'{incremental}') # Update backup status in db conductor = conductor_api.API(context) mount_point = CONF.get(CONF.datastore_manager).mount_point stats = guestagent_utils.get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, 'backup_type': backup_type } conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) # Start to run backup inside a separate docker container try: LOG.info('Starting to create backup %s, command: %s', backup_id, command) output, ret = docker_util.run_container(self.docker_client, image, name, volumes=volumes, command=command) result = output[-1] if not ret: msg = f'Failed to run backup container, error: {result}' LOG.error(msg) raise Exception(msg) backup_result = BACKUP_LOG.match(result) if backup_result: backup_state.update({ 'checksum': backup_result.group('checksum'), 'location': backup_result.group('location'), 'success': True, 'state': BackupState.COMPLETED, }) else: backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) except Exception as err: LOG.error("Failed to create backup %s", backup_id) backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) raise exception.TroveError( "Failed to create backup %s, error: %s" % (backup_id, str(err))) finally: LOG.info("Completed backup %s.", backup_id) conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def create_backup(self, context, backup_info, volumes_mapping={}, need_dbuser=True, extra_params=''): storage_driver = CONF.storage_strategy backup_driver = self.get_backup_strategy() incremental = '' backup_type = 'full' if backup_info.get('parent'): incremental = ( f'--incremental ' f'--parent-location={backup_info["parent"]["location"]} ' f'--parent-checksum={backup_info["parent"]["checksum"]}') backup_type = 'incremental' name = 'db_backup' backup_id = backup_info["id"] image = self.get_backup_image() os_cred = (f"--os-token={context.auth_token} " f"--os-auth-url={CONF.service_credentials.auth_url} " f"--os-tenant-id={context.project_id}") db_userinfo = '' if need_dbuser: admin_pass = self.get_auth_password() # Use localhost to avoid host access verification. db_userinfo = (f"--db-host=localhost --db-user=os_admin " f"--db-password={admin_pass}") swift_metadata = ( f'datastore:{backup_info["datastore"]},' f'datastore_version:{backup_info["datastore_version"]}' ) swift_container = (backup_info.get('swift_container') or CONF.backup_swift_container) swift_params = (f'--swift-extra-metadata={swift_metadata} ' f'--swift-container={swift_container}') command = ( f'/usr/bin/python3 main.py --backup --backup-id={backup_id} ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'{os_cred} ' f'{db_userinfo} ' f'{swift_params} ' f'{incremental} ' f'{extra_params}' ) # Update backup status in db conductor = conductor_api.API(context) mount_point = cfg.get_configuration_property('mount_point') stats = guestagent_utils.get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, 'backup_type': backup_type } conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug(f"Updated state for backup {backup_id} to {backup_state}") # Start to run backup inside a separate docker container try: LOG.info(f'Starting to create backup {backup_id}, ' f'command: {command}') output, ret = docker_util.run_container( self.docker_client, image, name, volumes=volumes_mapping, command=command) result = output[-1] if not ret: msg = f'Failed to run backup container, error: {result}' LOG.error(msg) raise Exception(msg) backup_result = BACKUP_LOG_RE.match(result) if backup_result: backup_state.update({ 'checksum': backup_result.group('checksum'), 'location': backup_result.group('location'), 'success': True, 'state': BackupState.COMPLETED, }) else: msg = f'Cannot parse backup output: {result}' LOG.error(msg) backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) # The exception message is visible to the user user_msg = msg ex_regex = re.compile(r'.+Exception: (.+)') for line in output[-5:-1]: m = ex_regex.search(line) if m: user_msg = m.group(1) break raise Exception(user_msg) except Exception as err: LOG.error("Failed to create backup %s", backup_id) backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) raise exception.TroveError( "Failed to create backup %s, error: %s" % (backup_id, str(err)) ) finally: LOG.info("Completed backup %s.", backup_id) conductor.update_backup( CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info(_("Running backup %(id)s") % backup_info) storage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = INCREMENTAL_RUNNER LOG.info(_("Using incremental runner: %s") % runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) # Store the size of the filesystem before the backup. mount_point = CONF.get('mysql' if not CONF.datastore_manager else CONF. datastore_manager).mount_point stats = get_filesystem_volume_stats(mount_point) backup = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.info(_("Starting Backup %s"), backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.info( _("Backup %(backup_id)s completed status: " "%(success)s") % backup) LOG.info( _("Backup %(backup_id)s file swift checksum: " "%(checksum)s") % backup) LOG.info( _("Backup %(backup_id)s location: " "%(location)s") % backup) if not success: raise BackupError(note) storage.save_metadata(location, bkup.metadata()) except Exception: LOG.exception( _("Error saving %(backup_id)s Backup") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise except Exception: LOG.exception(_("Error running backup: %(backup_id)s") % backup) backup.update({'state': BackupState.FAILED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup) raise else: LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup) backup.update({'state': BackupState.COMPLETED}) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup)
def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS): LOG.debug("Searching for backup instance %s", backup_info['id']) ctxt = trove_context.TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass) conductor = conductor_api.API(ctxt) LOG.info("Running backup %s", backup_info['id']) user = ADMIN_USER_NAME password = get_auth_password() swiftStorage = get_storage_strategy(CONF.storage_strategy, CONF.storage_namespace)(context) # Store the size of the filesystem before the backup. stats = get_filesystem_volume_stats(CONF.mount_point) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], size=stats.get('used', 0.0), state=BackupState.BUILDING) with runner(filename=backup_info['id'], extra_opts=extra_opts, user=user, password=password) as bkup: try: LOG.info("Starting Backup %s", backup_info['id']) success, note, checksum, location = swiftStorage.save( BACKUP_CONTAINER, bkup) LOG.info("Backup %s completed status: %s", backup_info['id'], success) LOG.info("Backup %s file size: %s", backup_info['id'], bkup.content_length) LOG.info('Backup %s file swift checksum: %s', backup_info['id'], checksum) LOG.info('Backup %s location: %s', backup_info['id'], location) if not success: raise BackupError(note) except Exception as e: LOG.error(e) LOG.error("Error saving %s Backup", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], state=BackupState.FAILED) raise else: LOG.info("Saving %s Backup Info to model", backup_info['id']) conductor.update_backup(CONF.guest_id, backup_id=backup_info['id'], checksum=checksum, location=location, note=note, backup_type=bkup.backup_type, state=BackupState.COMPLETED)