def inner_retry(self, *args): for n in range(1, globals.num_retries + 1): try: return fn(self, *args) except FatalBackendException as e: # die on fatal errors raise e except Exception as e: # retry on anything else log.Debug( _(u"Backtrace of previous error: %s") % exception_traceback()) at_end = n == globals.num_retries code = _get_code_from_exception(self.backend, operation, e) if code == log.ErrorCode.backend_not_found: # If we tried to do something, but the file just isn't there, # no need to retry. at_end = True if at_end and fatal: def make_filename(f): if isinstance(f, path.ROPath): return util.escape(f.uc_name) else: return util.escape(f) extra = u' '.join([operation] + [ make_filename(x) for x in args if (x and isinstance(x, str)) ]) raise log.FatalError( _(u"Giving up after %s attempts. %s: %s") % (n, e.__class__.__name__, util.uexc(e)), code=code, extra=extra) else: log.Warn( _(u"Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e))) if not at_end: if isinstance(e, TemporaryLoadException): time.sleep(3 * globals.backend_retry_delay ) # wait longer before trying again else: time.sleep(globals.backend_retry_delay ) # wait a bit before trying again if hasattr(self.backend, u'_retry_cleanup'): self.backend._retry_cleanup()
def handle_error(self, e, op, file1 = None, file2 = None): code = log.ErrorCode.backend_error if hasattr(e, 'errno'): if e.errno == errno.EACCES: code = log.ErrorCode.backend_permission_denied elif e.errno == errno.ENOENT: code = log.ErrorCode.backend_not_found elif e.errno == errno.ENOSPC: code = log.ErrorCode.backend_no_space extra = ' '.join([util.escape(x) for x in [file1, file2] if x]) extra = ' '.join([op, extra]) if op != 'delete' and op != 'query': log.FatalError(util.uexc(e), code, extra) else: log.Warn(util.uexc(e), code, extra)
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) try: import pyrax except ImportError as e: raise BackendException("""\ Hubic backend requires the pyrax library available from Rackspace. Exception: %s""" % str(e)) # Inform Pyrax that we're talking to Hubic pyrax.set_setting( "identity_type", "duplicity.backends.pyrax_identity.hubic.HubicIdentity") CREDENTIALS_FILE = os.path.expanduser("~/.hubic_credentials") if os.path.exists(CREDENTIALS_FILE): try: pyrax.set_credential_file(CREDENTIALS_FILE) except Exception as e: log.FatalError( "Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) else: raise BackendException("No ~/.hubic_credentials file found.") container = parsed_url.path.lstrip('/') self.client_exc = pyrax.exceptions.ClientException self.nso_exc = pyrax.exceptions.NoSuchObject self.container = pyrax.cloudfiles.create_container(container)
def request(self, *args, **kwargs): # pylint: disable=method-hidden try: return httplib.HTTPSConnection.request(self, *args, **kwargs) except ssl.SSLError as e: # encapsulate ssl errors raise BackendException("SSL failed: %s" % util.uexc(e), log.ErrorCode.backend_error)
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) try: import pyrax except ImportError as e: raise BackendException("""\ Hubic backend requires the pyrax library available from Rackspace. Exception: %s""" % str(e)) # Inform Pyrax that we're talking to Hubic pyrax.set_setting("identity_type", "duplicity.backends.pyrax_identity.hubic.HubicIdentity") CREDENTIALS_FILE = os.path.expanduser("~/.hubic_credentials") if os.path.exists(CREDENTIALS_FILE): try: pyrax.set_credential_file(CREDENTIALS_FILE) except Exception as e: log.FatalError("Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) else: raise BackendException("No ~/.hubic_credentials file found.") container = parsed_url.path.lstrip('/') self.client_exc = pyrax.exceptions.ClientException self.nso_exc = pyrax.exceptions.NoSuchObject self.container = pyrax.cloudfiles.create_container(container)
def done_with_mount(self, fileobj, result, loop): try: fileobj.mount_enclosing_volume_finish(result) except GLib.GError, e: # check for NOT_SUPPORTED because some schemas (e.g. file://) validly don't if e.code != Gio.IOErrorEnum.ALREADY_MOUNTED and e.code != Gio.IOErrorEnum.NOT_SUPPORTED: log.FatalError(_("Connection failed, please check your password: %s") % util.uexc(e), log.ErrorCode.connection_failed)
def login(self): if not self.sess.is_linked(): try: # to login to the box self.sess.link() except rest.ErrorResponse as e: log.FatalError('dpbx Error: %s\n' % util.uexc(e), log.ErrorCode.dpbx_nologin) if not self.sess.is_linked(): # stil not logged in log.FatalError("dpbx Cannot login: check your credentials",log.ErrorCode.dpbx_nologin)
def done_with_mount(self, fileobj, result, loop): try: fileobj.mount_enclosing_volume_finish(result) except GLib.GError, e: # check for NOT_SUPPORTED because some schemas (e.g. file://) validly don't if e.code != Gio.IOErrorEnum.ALREADY_MOUNTED and e.code != Gio.IOErrorEnum.NOT_SUPPORTED: log.FatalError( _("Connection failed, please check your password: %s") % util.uexc(e), log.ErrorCode.connection_failed)
def login(self): if not self.sess.is_linked(): try: # to login to the box self.sess.link() except rest.ErrorResponse as e: log.FatalError('dpbx Error: %s\n' % util.uexc(e), log.ErrorCode.dpbx_nologin) if not self.sess.is_linked(): # stil not logged in log.FatalError("dpbx Cannot login: check your credentials", log.ErrorCode.dpbx_nologin)
def __done_with_mount(self, fileobj, result, loop): from gi.repository import Gio # @UnresolvedImport # pylint: disable=import-error from gi.repository import GLib # @UnresolvedImport # pylint: disable=import-error try: fileobj.mount_enclosing_volume_finish(result) except GLib.GError as e: # check for NOT_SUPPORTED because some schemas (e.g. file://) validly don't if e.code != Gio.IOErrorEnum.ALREADY_MOUNTED and e.code != Gio.IOErrorEnum.NOT_SUPPORTED: log.FatalError(_("Connection failed, please check your password: %s") % util.uexc(e), log.ErrorCode.connection_failed) loop.quit()
def read(self, length=-1): try: buf = self.infile.read(length) except IOError as ex: buf = b"" log.Warn( _(u"Error %s getting delta for %s") % (util.uexc(ex), util.fsdecode(self.infile.name))) if stats: stats.SourceFileSize += len(buf) return buf
def runremote(self, cmd, ignoreexitcode=False, errorprefix=u""): u"""small convenience function that opens a shell channel, runs remote command and returns stdout of command. throws an exception if exit code!=0 and not ignored""" try: ch_in, ch_out, ch_err = self.client.exec_command(cmd, -1, config.timeout) output = ch_out.read(-1) return output except Exception as e: if not ignoreexitcode: raise BackendException(u"%sfailed: %s \n %s" % ( errorprefix, cmd, util.uexc(e)))
def iterate(*args): for n in range(1, globals.num_retries): try: kwargs = {"raise_errors": True} return fn(*args, **kwargs) except Exception, e: log.Warn(_("Attempt %s failed: %s: %s") % (n, e.__class__.__name__, util.uexc(e))) log.Debug(_("Backtrace of previous error: %s") % exception_traceback()) if isinstance(e, TemporaryLoadException): time.sleep(30) # wait longer before trying again else: time.sleep(10) # wait a bit before trying again
def get_remote_manifest(self): """ Return manifest by reading remote manifest on backend """ assert self.remote_manifest_name try: manifest_buffer = self.backend.get_data(self.remote_manifest_name) except GPGError as message: log.Error(_("Error processing remote manifest (%s): %s") % (util.ufn(self.remote_manifest_name), util.uexc(message))) return None log.Info(_("Processing remote manifest %s (%s)") % ( util.ufn(self.remote_manifest_name), len(manifest_buffer))) return manifest.Manifest().from_string(manifest_buffer)
def handle_error(self, raise_error, e, op, file1=None, file2=None): if raise_error: raise e code = log.ErrorCode.backend_error if isinstance(e, GLib.GError): if e.code == Gio.IOErrorEnum.PERMISSION_DENIED: code = log.ErrorCode.backend_permission_denied elif e.code == Gio.IOErrorEnum.NOT_FOUND: code = log.ErrorCode.backend_not_found elif e.code == Gio.IOErrorEnum.NO_SPACE: code = log.ErrorCode.backend_no_space extra = ' '.join([util.escape(x) for x in [file1, file2] if x]) extra = ' '.join([op, extra]) log.FatalError(util.uexc(e), code, extra)
def delta_iter_error_handler(exc, new_path, sig_path, sig_tar=None): # pylint: disable=unused-argument u""" Called by get_delta_iter, report error in getting delta """ if new_path: index_string = new_path.get_relative_path() elif sig_path: index_string = sig_path.get_relative_path() else: assert 0, u"Both new and sig are None for some reason" log.Warn( _(u"Error %s getting delta for %s") % (util.uexc(exc), util.fsdecode(index_string))) return None
def inner_retry(self, *args): for n in range(1, globals.num_retries + 1): try: return fn(self, *args) except FatalBackendException as e: # die on fatal errors raise e except Exception as e: # retry on anything else log.Debug(_("Backtrace of previous error: %s") % exception_traceback()) at_end = n == globals.num_retries code = _get_code_from_exception(self.backend, operation, e) if code == log.ErrorCode.backend_not_found: # If we tried to do something, but the file just isn't there, # no need to retry. at_end = True if at_end and fatal: def make_filename(f): if isinstance(f, path.ROPath): return util.escape(f.name) else: return util.escape(f) extra = ' '.join([operation] + [make_filename(x) for x in args if x]) log.FatalError(_("Giving up after %s attempts. %s: %s") % (n, e.__class__.__name__, util.uexc(e)), code=code, extra=extra) else: log.Warn(_("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e))) if not at_end: if isinstance(e, TemporaryLoadException): time.sleep(3 * globals.backend_retry_delay) # wait longer before trying again else: time.sleep(globals.backend_retry_delay) # wait a bit before trying again if hasattr(self.backend, '_retry_cleanup'): self.backend._retry_cleanup()
def iterate(*args): for n in range(1, globals.num_retries): try: kwargs = {"raise_errors": True} return fn(*args, **kwargs) except Exception, e: log.Warn( _("Attempt %s failed: %s: %s") % (n, e.__class__.__name__, util.uexc(e))) log.Debug( _("Backtrace of previous error: %s") % exception_traceback()) if isinstance(e, TemporaryLoadException): time.sleep(30) # wait longer before trying again else: time.sleep(10) # wait a bit before trying again
def wrapper(self, *args): if login_required and not self.sess.is_linked(): raise BackendException("dpbx Cannot login: check your credentials", log.ErrorCode.dpbx_nologin) return try: return f(self, *args) except TypeError as e: log_exception(e) raise BackendException('dpbx type error "%s"' % (e,)) except rest.ErrorResponse as e: msg = e.user_error_msg or util.uexc(e) log.Error('dpbx error: %s' % (msg,), log.ErrorCode.backend_command_error) raise e except Exception as e: log_exception(e) log.Error('dpbx code error "%s"' % (e,), log.ErrorCode.backend_code_error) raise e
def _list(self): lists = [] for s in self.__stores: config.are_errors_fatal[u'list'] = (False, []) l = s.list() log.Notice( _(u"MultiBackend: %s: %d files") % (s.backend.parsed_url.url_string, len(l))) if len(l) == 0 and duplicity.backend._last_exception: log.Warn( _(u"Exception during list of %s: %s" % (s.backend.parsed_url.url_string, util.uexc(duplicity.backend._last_exception)))) duplicity.backend._last_exception = None lists.append(l) # combine the lists into a single flat list w/o duplicates via set: result = list({item for sublist in lists for item in sublist}) log.Log(_(u"MultiBackend: combined list: %s") % (result), log.DEBUG) return result
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) try: import pyrax except ImportError as e: raise BackendException("""\ Pyrax backend requires the pyrax library available from Rackspace. Exception: %s""" % str(e)) # Inform Pyrax that we're talking to Rackspace # per Jesus Monzon (gsusmonzon) pyrax.set_setting("identity_type", "rackspace") conn_kwargs = {} if 'CLOUDFILES_USERNAME' not in os.environ: raise BackendException('CLOUDFILES_USERNAME environment variable' 'not set.') if 'CLOUDFILES_APIKEY' not in os.environ: raise BackendException( 'CLOUDFILES_APIKEY environment variable not set.') conn_kwargs['username'] = os.environ['CLOUDFILES_USERNAME'] conn_kwargs['api_key'] = os.environ['CLOUDFILES_APIKEY'] if 'CLOUDFILES_REGION' in os.environ: conn_kwargs['region'] = os.environ['CLOUDFILES_REGION'] container = parsed_url.path.lstrip('/') try: pyrax.set_credentials(**conn_kwargs) except Exception as e: log.FatalError( "Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) self.client_exc = pyrax.exceptions.ClientException self.nso_exc = pyrax.exceptions.NoSuchObject self.container = pyrax.cloudfiles.create_container(container)
def unmarshall(): u""" De-serializes cached data it if present """ snapshot = Snapshot() # If restarting Full, discard marshalled data and start over if config.restart is not None and config.restart.start_vol >= 1: try: progressfd = open( u'%s/progress' % config.archive_dir_path.name, u'r') snapshot = pickle.load(progressfd) progressfd.close() except Exception as e: log.Warn( u"Warning, cannot read stored progress info from previous backup: {}" .format(util.uexc(e)), log.WarningCode.cannot_stat) snapshot = Snapshot() # Reached here no cached data found or wrong marshalling return snapshot
def _retry_fatal(self, *args): try: n = 0 for n in range(1, globals.num_retries): try: self.retry_count = n return fn(self, *args) except FatalBackendError, e: # die on fatal errors raise e except Exception, e: # retry on anything else log.Warn( _("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e))) log.Debug( _("Backtrace of previous error: %s") % exception_traceback()) time.sleep(10) # wait a bit before trying again
def __init__(self, parsed_url): try: from cloudfiles import Connection from cloudfiles.errors import ResponseError from cloudfiles import consts from cloudfiles.errors import NoSuchObject except ImportError as e: raise BackendException("""\ Cloudfiles backend requires the cloudfiles library available from Rackspace. Exception: %s""" % str(e)) self.resp_exc = ResponseError conn_kwargs = {} if 'CLOUDFILES_USERNAME' not in os.environ: raise BackendException('CLOUDFILES_USERNAME environment variable' 'not set.') if 'CLOUDFILES_APIKEY' not in os.environ: raise BackendException( 'CLOUDFILES_APIKEY environment variable not set.') conn_kwargs['username'] = os.environ['CLOUDFILES_USERNAME'] conn_kwargs['api_key'] = os.environ['CLOUDFILES_APIKEY'] if 'CLOUDFILES_AUTHURL' in os.environ: conn_kwargs['authurl'] = os.environ['CLOUDFILES_AUTHURL'] else: conn_kwargs['authurl'] = consts.default_authurl container = parsed_url.path.lstrip('/') try: conn = Connection(**conn_kwargs) except Exception as e: log.FatalError( "Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) self.container = conn.create_container(container)
def wrapper(self, *args): if login_required and not self.sess.is_linked(): raise BackendException( "dpbx Cannot login: check your credentials", log.ErrorCode.dpbx_nologin) return try: return f(self, *args) except TypeError as e: log_exception(e) raise BackendException('dpbx type error "%s"' % (e, )) except rest.ErrorResponse as e: msg = e.user_error_msg or util.uexc(e) log.Error('dpbx error: %s' % (msg, ), log.ErrorCode.backend_command_error) raise e except Exception as e: log_exception(e) log.Error('dpbx code error "%s"' % (e, ), log.ErrorCode.backend_code_error) raise e
def integrate_patch_iters(iter_list): u"""Combine a list of iterators of ropath patches The iter_list should be sorted in patch order, and the elements in each iter_list need to be orderd by index. The output will be an iterator of the final ROPaths in index order. """ collated = collate_iters(iter_list) for patch_seq in collated: normalized = normalize_ps(patch_seq) try: final_ropath = patch_seq2ropath(normalized) if final_ropath.exists(): # otherwise final patch was delete yield final_ropath except Exception as e: filename = normalized[-1].get_ropath().get_relative_path() log.Warn( _(u"Error '%s' patching %s") % (util.uexc(e), util.fsdecode(filename)), log.WarningCode.cannot_process, util.escape(filename))
def integrate_patch_iters(iter_list): """Combine a list of iterators of ropath patches The iter_list should be sorted in patch order, and the elements in each iter_list need to be orderd by index. The output will be an iterator of the final ROPaths in index order. """ collated = collate_iters(iter_list) for patch_seq in collated: normalized = normalize_ps(patch_seq) try: final_ropath = patch_seq2ropath(normalized) if final_ropath.exists(): # otherwise final patch was delete yield final_ropath except Exception as e: filename = normalized[-1].get_ropath().get_relative_path() log.Warn(_("Error '%s' patching %s") % (util.uexc(e), util.fsdecode(filename)), log.WarningCode.cannot_process, util.escape(filename))
def __init__(self, parsed_url): try: from cloudfiles import Connection from cloudfiles.errors import ResponseError from cloudfiles import consts from cloudfiles.errors import NoSuchObject except ImportError as e: raise BackendException("""\ Cloudfiles backend requires the cloudfiles library available from Rackspace. Exception: %s""" % str(e)) self.resp_exc = ResponseError conn_kwargs = {} if 'CLOUDFILES_USERNAME' not in os.environ: raise BackendException('CLOUDFILES_USERNAME environment variable' 'not set.') if 'CLOUDFILES_APIKEY' not in os.environ: raise BackendException('CLOUDFILES_APIKEY environment variable not set.') conn_kwargs['username'] = os.environ['CLOUDFILES_USERNAME'] conn_kwargs['api_key'] = os.environ['CLOUDFILES_APIKEY'] if 'CLOUDFILES_AUTHURL' in os.environ: conn_kwargs['authurl'] = os.environ['CLOUDFILES_AUTHURL'] else: conn_kwargs['authurl'] = consts.default_authurl container = parsed_url.path.lstrip('/') try: conn = Connection(**conn_kwargs) except Exception as e: log.FatalError("Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) self.container = conn.create_container(container)
def __init__(self, parsed_url): try: import pyrax except ImportError: raise BackendException("This backend requires the pyrax " "library available from Rackspace.") # Inform Pyrax that we're talking to Rackspace # per Jesus Monzon (gsusmonzon) pyrax.set_setting("identity_type", "rackspace") conn_kwargs = {} if 'CLOUDFILES_USERNAME' not in os.environ: raise BackendException('CLOUDFILES_USERNAME environment variable' 'not set.') if 'CLOUDFILES_APIKEY' not in os.environ: raise BackendException('CLOUDFILES_APIKEY environment variable not set.') conn_kwargs['username'] = os.environ['CLOUDFILES_USERNAME'] conn_kwargs['api_key'] = os.environ['CLOUDFILES_APIKEY'] if 'CLOUDFILES_REGION' in os.environ: conn_kwargs['region'] = os.environ['CLOUDFILES_REGION'] container = parsed_url.path.lstrip('/') try: pyrax.set_credentials(**conn_kwargs) except Exception as e: log.FatalError("Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) self.client_exc = pyrax.exceptions.ClientException self.nso_exc = pyrax.exceptions.NoSuchObject self.container = pyrax.cloudfiles.create_container(container)
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) try: import pyrax except ImportError as e: raise BackendException("""\ Pyrax backend requires the pyrax library available from Rackspace. Exception: %s""" % str(e)) # Inform Pyrax that we're talking to Rackspace # per Jesus Monzon (gsusmonzon) pyrax.set_setting("identity_type", "rackspace") conn_kwargs = {} if 'CLOUDFILES_USERNAME' not in os.environ: raise BackendException('CLOUDFILES_USERNAME environment variable' 'not set.') if 'CLOUDFILES_APIKEY' not in os.environ: raise BackendException( 'CLOUDFILES_APIKEY environment variable not set.') conn_kwargs['username'] = os.environ['CLOUDFILES_USERNAME'] conn_kwargs['api_key'] = os.environ['CLOUDFILES_APIKEY'] if 'CLOUDFILES_REGION' in os.environ: conn_kwargs['region'] = os.environ['CLOUDFILES_REGION'] container = parsed_url.path.lstrip('/') try: pyrax.set_credentials(**conn_kwargs) except Exception as e: log.FatalError( "Connection failed, please check your credentials: %s %s" % (e.__class__.__name__, util.uexc(e)), log.ErrorCode.connection_failed) self.client_exc = pyrax.exceptions.ClientException self.nso_exc = pyrax.exceptions.NoSuchObject # query rackspace for the specified container name try: self.container = pyrax.cloudfiles.get_container(container) except pyrax.exceptions.Forbidden as e: log.FatalError( "%s : %s \n" % (e.__class__.__name__, util.uexc(e)) + "Container may exist, but access was denied.\n" + "If this container exists, please check its X-Container-Read/Write headers.\n" + "Otherwise, please check your credentials and permissions.", log.ErrorCode.backend_permission_denied) except pyrax.exceptions.NoSuchContainer as e: try: self.container = pyrax.cloudfiles.create_container(container) except pyrax.exceptions.Forbidden as e: log.FatalError( "%s : %s \n" % (e.__class__.__name__, util.uexc(e)) + "Container does not exist, but creation was denied.\n" + "You may be using a read-only user that can view but not create containers.\n" + "Please check your credentials and permissions.", log.ErrorCode.backend_permission_denied)
log.Warn( _("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e))) log.Debug( _("Backtrace of previous error: %s") % exception_traceback()) time.sleep(10) # wait a bit before trying again # final trial, die on exception self.retry_count = n + 1 return fn(self, *args) except Exception, e: log.Debug( _("Backtrace of previous error: %s") % exception_traceback()) log.FatalError( _("Giving up after %s attempts. %s: %s") % (self.retry_count, e.__class__.__name__, util.uexc(e)), log.ErrorCode.backend_error) self.retry_count = 0 return _retry_fatal class Backend: """ Represents a generic duplicity backend, capable of storing and retrieving files. Concrete sub-classes are expected to implement: - put - get
return fn(self, *args) except FatalBackendError, e: # die on fatal errors raise e except Exception, e: # retry on anything else log.Warn(_("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e))) log.Debug(_("Backtrace of previous error: %s") % exception_traceback()) time.sleep(10) # wait a bit before trying again # final trial, die on exception self.retry_count = n + 1 return fn(self, *args) except Exception, e: log.Debug(_("Backtrace of previous error: %s") % exception_traceback()) log.FatalError( _("Giving up after %s attempts. %s: %s") % (self.retry_count, e.__class__.__name__, util.uexc(e)), log.ErrorCode.backend_error, ) self.retry_count = 0 return _retry_fatal class Backend: """ Represents a generic duplicity backend, capable of storing and retrieving files. Concrete sub-classes are expected to implement: - put
def _retry_fatal(self, *args): try: n = 0 for n in range(1, globals.num_retries): try: self.retry_count = n return fn(self, *args) except FatalBackendError, e: # die on fatal errors raise e except Exception, e: # retry on anything else log.Warn(_("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e))) log.Debug(_("Backtrace of previous error: %s") % exception_traceback()) time.sleep(10) # wait a bit before trying again