Esempio n. 1
0
 def iterate(*args):
     for n in range(1, globals.num_retries):
         try:
             return fn(*args)
         except Exception, e:
             log.Warn("Attempt %s failed. %s: %s"
                      % (n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
             time.sleep(10) # wait a bit before trying again
Esempio n. 2
0
 def iterate(*args):
     for n in range(1, globals.num_retries):
         try:
             kwargs = {"raise_errors": True}
             return fn(*args, **kwargs)
         except Exception, e:
             log.Warn("Attempt %s failed: %s: %s" %
                      (n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s" %
                       exception_traceback())
             if isinstance(e, TemporaryLoadException):
                 time.sleep(30)  # wait a bit before trying again
Esempio n. 3
0
 def iterate(*args):
     for n in range(1, globals.num_retries):
         try:
             kwargs = {"raise_errors": True}
             return fn(*args, **kwargs)
         except Exception, e:
             log.Warn(_("Attempt %s failed: %s: %s") % (n, e.__class__.__name__, util.uexc(e)))
             log.Debug(_("Backtrace of previous error: %s") % exception_traceback())
             if isinstance(e, TemporaryLoadException):
                 time.sleep(30)  # wait longer before trying again
             else:
                 time.sleep(10)  # wait a bit before trying again
Esempio n. 4
0
 def list(self):
     for n in range(1, globals.num_retries+1):
         log.Info("Listing '%s'" % (self.container))
         try:
             keys = self.container.list_objects()
             return keys
         except self.resp_exc, resperr:
             log.Warn("Listing of '%s' failed (attempt %s): CloudFiles returned: %s %s"
                      % (self.container, n, resperr.status, resperr.reason))
         except Exception, e:
             log.Warn("Listing of '%s' failed (attempt %s): %s: %s"
                      % (self.container, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
        def inner_retry(self, *args):
            for n in range(1, globals.num_retries + 1):
                try:
                    return fn(self, *args)
                except FatalBackendException as e:
                    # die on fatal errors
                    raise e
                except Exception as e:
                    # retry on anything else
                    log.Debug(
                        _(u"Backtrace of previous error: %s") %
                        exception_traceback())
                    at_end = n == globals.num_retries
                    code = _get_code_from_exception(self.backend, operation, e)
                    if code == log.ErrorCode.backend_not_found:
                        # If we tried to do something, but the file just isn't there,
                        # no need to retry.
                        at_end = True
                    if at_end and fatal:

                        def make_filename(f):
                            if isinstance(f, path.ROPath):
                                return util.escape(f.uc_name)
                            else:
                                return util.escape(f)

                        extra = u' '.join([operation] + [
                            make_filename(x)
                            for x in args if (x and isinstance(x, str))
                        ])
                        raise
                        log.FatalError(
                            _(u"Giving up after %s attempts. %s: %s") %
                            (n, e.__class__.__name__, util.uexc(e)),
                            code=code,
                            extra=extra)
                    else:
                        log.Warn(
                            _(u"Attempt %s failed. %s: %s") %
                            (n, e.__class__.__name__, util.uexc(e)))
                    if not at_end:
                        if isinstance(e, TemporaryLoadException):
                            time.sleep(3 * globals.backend_retry_delay
                                       )  # wait longer before trying again
                        else:
                            time.sleep(globals.backend_retry_delay
                                       )  # wait a bit before trying again
                        if hasattr(self.backend, u'_retry_cleanup'):
                            self.backend._retry_cleanup()
Esempio n. 6
0
 def _retry_fatal(self, *args):
     try:
         n = 0
         for n in range(1, globals.num_retries):
             try:
                 self.retry_count = n
                 return fn(self, *args)
             except FatalBackendError, e:
                 # die on fatal errors
                 raise e
             except Exception, e:
                 # retry on anything else
                 log.Warn(_("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e)))
                 log.Debug(_("Backtrace of previous error: %s") % exception_traceback())
                 time.sleep(10)  # wait a bit before trying again
 def list(self):
     for n in range(1, globals.num_retries+1):
         log.Info("Listing '%s'" % (self.container))
         try:
             # Cloud Files will return a max of 10,000 objects.  We have
             # to make multiple requests to get them all.
             headers, objs = self.conn.get_container(self.container)
             return [ o['name'] for o in objs ]
         except self.resp_exc, resperr:
             log.Warn("Listing of '%s' failed (attempt %s): Swift server returned: %s %s"
                      % (self.container, n, resperr.http_status, resperr.message))
         except Exception, e:
             log.Warn("Listing of '%s' failed (attempt %s): %s: %s"
                      % (self.container, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 8
0
 def _retry_fatal(self, *args):
     try:
         n = 0
         for n in range(1, globals.num_retries):
             try:
                 self.retry_count = n
                 return fn(self, *args)
             except FatalBackendError, e:
                 # die on fatal errors
                 raise e
             except Exception, e:
                 # retry on anything else
                 log.Warn("Attempt %s failed. %s: %s" %
                          (n, e.__class__.__name__, str(e)))
                 log.Debug("Backtrace of previous error: %s" %
                           exception_traceback())
                 time.sleep(10)  # wait a bit before trying again
Esempio n. 9
0
    def put(self, source_path, remote_filename = None):
        if not remote_filename:
            remote_filename = source_path.get_filename()

        for n in range(1, globals.num_retries + 1):
            log.Info("Uploading '%s/%s' " % (self.container, remote_filename))
            try:
                self.container.upload_file(source_path.name, remote_filename)
                return
            except self.client_exc, error:
                log.Warn("Upload of '%s' failed (attempt %d): pyrax returned: %s %s"
                         % (remote_filename, n, error.__class__.__name__, error.message))
            except Exception, e:
                log.Warn("Upload of '%s' failed (attempt %s): %s: %s"
                        % (remote_filename, n, e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s"
                          % exception_traceback())
Esempio n. 10
0
    def put(self, source_path, remote_filename = None):
        if not remote_filename:
            remote_filename = source_path.get_filename()

        for n in range(1, globals.num_retries+1):
            log.Info("Uploading '%s/%s' " % (self.container, remote_filename))
            try:
                sobject = self.container.create_object(remote_filename)
                sobject.load_from_filename(source_path.name)
                return
            except self.resp_exc, error:
                log.Warn("Upload of '%s' failed (attempt %d): CloudFiles returned: %s %s"
                         % (remote_filename, n, error.status, error.reason))
            except Exception, e:
                log.Warn("Upload of '%s' failed (attempt %s): %s: %s"
                        % (remote_filename, n, e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s"
                          % exception_traceback())
Esempio n. 11
0
    def _list(self):
        if not self.bucket:
            raise BackendException("No connection to backend")

        for n in range(1, globals.num_retries+1):
            if n > 1:
                # sleep before retry
                time.sleep(30)
            log.Info("Listing %s" % self.straight_url)
            try:
                return self._list_filenames_in_bucket()
            except Exception, e:
                log.Warn("List %s failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url,
                               n,
                               e.__class__.__name__,
                               str(e)), 1)
                log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
Esempio n. 12
0
    def put(self, source_path, remote_filename = None):
        if not remote_filename:
            remote_filename = source_path.get_filename()

        for n in range(1, globals.num_retries+1):
            log.Info("Uploading '%s/%s' " % (self.container, remote_filename))
            try:
                sobject = self.container.create_object(remote_filename)
                sobject.load_from_filename(source_path.name)
                return
            except self.resp_exc, error:
                log.Warn("Upload of '%s' failed (attempt %d): CloudFiles returned: %s %s"
                         % (remote_filename, n, error.status, error.reason))
            except Exception, e:
                log.Warn("Upload of '%s' failed (attempt %s): %s: %s"
                        % (remote_filename, n, e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s"
                          % exception_traceback())
Esempio n. 13
0
    def _list(self):
        if not self.bucket:
            raise BackendException("No connection to backend")

        for n in range(1, globals.num_retries+1):
            if n > 1:
                # sleep before retry
                time.sleep(30)
            log.Info("Listing %s" % self.straight_url)
            try:
                return self._list_filenames_in_bucket()
            except Exception, e:
                log.Warn("List %s failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url,
                               n,
                               e.__class__.__name__,
                               str(e)), 1)
                log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
Esempio n. 14
0
 def get(self, remote_filename, local_path):
     for n in range(1, globals.num_retries+1):
         log.Info("Downloading '%s/%s'" % (self.container, remote_filename))
         try:
             sobject = self.container.create_object(remote_filename)
             f = open(local_path.name, 'w')
             for chunk in sobject.stream():
                 f.write(chunk)
             local_path.setdata()
             return
         except self.resp_exc, resperr:
             log.Warn("Download of '%s' failed (attempt %s): CloudFiles returned: %s %s"
                      % (remote_filename, n, resperr.status, resperr.reason))
         except Exception, e:
             log.Warn("Download of '%s' failed (attempt %s): %s: %s"
                      % (remote_filename, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 15
0
 def list(self):
     for n in range(1, globals.num_retries + 1):
         log.Info("Listing '%s'" % (self.container))
         try:
             # Cloud Files will return a max of 10,000 objects.  We have
             # to make multiple requests to get them all.
             headers, objs = self.conn.get_container(self.container)
             return [o['name'] for o in objs]
         except self.resp_exc, resperr:
             log.Warn(
                 "Listing of '%s' failed (attempt %s): Swift server returned: %s %s"
                 %
                 (self.container, n, resperr.http_status, resperr.message))
         except Exception, e:
             log.Warn("Listing of '%s' failed (attempt %s): %s: %s" %
                      (self.container, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s" %
                       exception_traceback())
Esempio n. 16
0
 def get(self, remote_filename, local_path):
     for n in range(1, globals.num_retries+1):
         log.Info("Downloading '%s/%s'" % (self.container, remote_filename))
         try:
             sobject = self.container.create_object(remote_filename)
             f = open(local_path.name, 'w')
             for chunk in sobject.stream():
                 f.write(chunk)
             local_path.setdata()
             return
         except self.resp_exc, resperr:
             log.Warn("Download of '%s' failed (attempt %s): CloudFiles returned: %s %s"
                      % (remote_filename, n, resperr.status, resperr.reason))
         except Exception, e:
             log.Warn("Download of '%s' failed (attempt %s): %s: %s"
                      % (remote_filename, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 17
0
 def delete_one(self, remote_filename):
     for n in range(1, globals.num_retries + 1):
         log.Info("Deleting '%s/%s'" % (self.container, remote_filename))
         try:
             self.container.delete_object(remote_filename)
             return
         except self.client_exc, resperr:
             if n > 1 and resperr.status == 404:
                 # We failed on a timeout, but delete succeeded on the server
                 log.Warn("Delete of '%s' missing after retry - must have succeded earler" % remote_filename)
                 return
             log.Warn("Delete of '%s' failed (attempt %s): pyrax returned: %s %s"
                      % (remote_filename, n, resperr.__class__.__name__, resperr.message))
         except Exception, e:
             log.Warn("Delete of '%s' failed (attempt %s): %s: %s"
                      % (remote_filename, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 18
0
 def get(self, remote_filename, local_path):
     for n in range(1, globals.num_retries + 1):
         log.Info("Downloading '%s/%s'" % (self.container, remote_filename))
         try:
             sobject = self.container.get_object(remote_filename)
             f = open(local_path.name, 'w')
             f.write(sobject.get())
             local_path.setdata()
             return
         except self.nso_exc:
             return
         except self.client_exc, resperr:
             log.Warn("Download of '%s' failed (attempt %s): pyrax returned: %s %s"
                      % (remote_filename, n, resperr.__class__.__name__, resperr.message))
         except Exception, e:
             log.Warn("Download of '%s' failed (attempt %s): %s: %s"
                      % (remote_filename, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 19
0
    def pre_process_download(self, files_to_download, wait=False):
        # Used primarily to move files in Glacier to S3
        if isinstance(files_to_download, basestring):
            files_to_download = [files_to_download]

        for remote_filename in files_to_download:
            success = False
            for n in range(1, globals.num_retries+1):
                if n > 1:
                    # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
                    time.sleep(10)
                    self.resetConnection()
                try:
                    key_name = self.key_prefix + remote_filename
                    if not self._listed_keys.get(key_name, False):
                        self._listed_keys[key_name] = list(self.bucket.list(key_name))[0]
                    key = self._listed_keys[key_name]

                    if key.storage_class == "GLACIER":
                        # We need to move the file out of glacier
                        if not self.bucket.get_key(key.key).ongoing_restore:
                            log.Info("File %s is in Glacier storage, restoring to S3" % remote_filename)
                            key.restore(days=1)  # Shouldn't need this again after 1 day
                        if wait:
                            log.Info("Waiting for file %s to restore from Glacier" % remote_filename)
                            while self.bucket.get_key(key.key).ongoing_restore:
                                time.sleep(60)
                                self.resetConnection()
                            log.Info("File %s was successfully restored from Glacier" % remote_filename)
                    success = True
                    break
                except Exception, e:
                    log.Warn("Restoration from Glacier for file %s/%s failed (attempt #%d, reason: %s: %s)"
                             "" % (self.straight_url,
                                   remote_filename,
                                   n,
                                   e.__class__.__name__,
                                   str(e)), 1)
                    log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
            if not success:
                log.Warn("Giving up trying to restore %s/%s after %d attempts" %
                        (self.straight_url, remote_filename, globals.num_retries))
                raise BackendException("Error restoring %s/%s from Glacier to S3" % (self.straight_url, remote_filename))
Esempio n. 20
0
 def get(self, remote_filename, local_path):
     for n in range(1, globals.num_retries+1):
         log.Info("Downloading '%s/%s'" % (self.container, remote_filename))
         try:
             headers, body = self.conn.get_object(self.container,
                                                  remote_filename)
             f = open(local_path.name, 'w')
             for chunk in body:
                 f.write(chunk)
             local_path.setdata()
             return
         except self.resp_exc, resperr:
             log.Warn("Download of '%s' failed (attempt %s): Swift server returned: %s %s"
                      % (remote_filename, n, resperr.http_status, resperr.message))
         except Exception, e:
             log.Warn("Download of '%s' failed (attempt %s): %s: %s"
                      % (remote_filename, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 21
0
 def _list(self):
     for n in range(1, globals.num_retries + 1):
         log.Info("Listing '%s'" % (self.container))
         try:
             # Cloud Files will return a max of 10,000 objects.  We have
             # to make multiple requests to get them all.
             objs = self.container.get_object_names()
             keys = objs
             while len(objs) == 10000:
                 objs = self.container.get_object_names(marker = keys[-1])
                 keys += objs
             return keys
         except self.client_exc, resperr:
             log.Warn("Listing of '%s' failed (attempt %s): pyrax returned: %s %s"
                      % (self.container, n, resperr.__class__.__name__, resperr.message))
         except Exception, e:
             log.Warn("Listing of '%s' failed (attempt %s): %s: %s"
                      % (self.container, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s"
                       % exception_traceback())
Esempio n. 22
0
def retry_fatal(fn):
    def iterate(*args):
        for n in range(1, globals.num_retries):
            try:
                return fn(*args)
            except Exception, e:
                log.Warn("Attempt %s failed. %s: %s"
                         % (n, e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s"
                          % exception_traceback())
                time.sleep(10) # wait a bit before trying again
        # final trial, die on exception
        try:
            return fn(*args)
        except Exception, e:
            log.FatalError("Giving up after %s attempts. %s: %s"
                         % (globals.num_retries, e.__class__.__name__, str(e)),
                          log.ErrorCode.backend_error)
            log.Debug("Backtrace of previous error: %s"
                        % exception_traceback())
Esempio n. 23
0
 def get(self, remote_filename, local_path):
     for n in range(1, globals.num_retries + 1):
         log.Info("Downloading '%s/%s'" % (self.container, remote_filename))
         try:
             headers, body = self.conn.get_object(self.container,
                                                  remote_filename)
             f = open(local_path.name, 'w')
             for chunk in body:
                 f.write(chunk)
             local_path.setdata()
             return
         except self.resp_exc, resperr:
             log.Warn(
                 "Download of '%s' failed (attempt %s): Swift server returned: %s %s"
                 %
                 (remote_filename, n, resperr.http_status, resperr.message))
         except Exception, e:
             log.Warn("Download of '%s' failed (attempt %s): %s: %s" %
                      (remote_filename, n, e.__class__.__name__, str(e)))
             log.Debug("Backtrace of previous error: %s" %
                       exception_traceback())
Esempio n. 24
0
 def get(self, remote_filename, local_path):
     key = self.key_class(self.bucket)
     key.key = self.key_prefix + remote_filename
     for n in range(1, globals.num_retries+1):
         if n > 1:
             # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
             time.sleep(10)
         log.Info("Downloading %s/%s" % (self.straight_url, remote_filename))
         try:
             key.get_contents_to_filename(local_path.name)
             local_path.setdata()
             self.resetConnection()
             return
         except Exception, e:
             log.Warn("Download %s/%s failed (attempt #%d, reason: %s: %s)"
                      "" % (self.straight_url,
                            remote_filename,
                            n,
                            e.__class__.__name__,
                            str(e)), 1)
             log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
             self.resetConnection()
Esempio n. 25
0
 def get(self, remote_filename, local_path):
     key = self.key_class(self.bucket)
     key.key = self.key_prefix + remote_filename
     for n in range(1, globals.num_retries+1):
         if n > 1:
             # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
             time.sleep(10)
         log.Info("Downloading %s/%s" % (self.straight_url, remote_filename))
         try:
             key.get_contents_to_filename(local_path.name)
             local_path.setdata()
             self.resetConnection()
             return
         except Exception, e:
             log.Warn("Download %s/%s failed (attempt #%d, reason: %s: %s)"
                      "" % (self.straight_url,
                            remote_filename,
                            n,
                            e.__class__.__name__,
                            str(e)), 1)
             log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
             self.resetConnection()
Esempio n. 26
0
 def inner_retry(self, *args):
     for n in range(1, globals.num_retries + 1):
         try:
             return fn(self, *args)
         except FatalBackendException as e:
             # die on fatal errors
             raise e
         except Exception as e:
             # retry on anything else
             log.Debug(_("Backtrace of previous error: %s")
                       % exception_traceback())
             at_end = n == globals.num_retries
             code = _get_code_from_exception(self.backend, operation, e)
             if code == log.ErrorCode.backend_not_found:
                 # If we tried to do something, but the file just isn't there,
                 # no need to retry.
                 at_end = True
             if at_end and fatal:
                 def make_filename(f):
                     if isinstance(f, path.ROPath):
                         return util.escape(f.name)
                     else:
                         return util.escape(f)
                 extra = ' '.join([operation] + [make_filename(x) for x in args if x])
                 log.FatalError(_("Giving up after %s attempts. %s: %s")
                                % (n, e.__class__.__name__,
                                   util.uexc(e)), code=code, extra=extra)
             else:
                 log.Warn(_("Attempt %s failed. %s: %s")
                          % (n, e.__class__.__name__, util.uexc(e)))
             if not at_end:
                 if isinstance(e, TemporaryLoadException):
                     time.sleep(3 * globals.backend_retry_delay)  # wait longer before trying again
                 else:
                     time.sleep(globals.backend_retry_delay)  # wait a bit before trying again
                 if hasattr(self.backend, '_retry_cleanup'):
                     self.backend._retry_cleanup()
Esempio n. 27
0
class BotoBackend(duplicity.backend.Backend):
    """
    Backend for Amazon's Simple Storage System, (aka Amazon S3), though
    the use of the boto module, (http://code.google.com/p/boto/).

    To make use of this backend you must set aws_access_key_id
    and aws_secret_access_key in your ~/.boto or /etc/boto.cfg
    with your Amazon Web Services key id and secret respectively.
    Alternatively you can export the environment variables
    AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
    """

    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        import boto
        assert boto.Version >= BOTO_MIN_VERSION

        # This folds the null prefix and all null parts, which means that:
        #  //MyBucket/ and //MyBucket are equivalent.
        #  //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
        self.url_parts = filter(lambda x: x != '', parsed_url.path.split('/'))

        if self.url_parts:
            self.bucket_name = self.url_parts.pop(0)
        else:
            # Duplicity hangs if boto gets a null bucket name.
            # HC: Caught a socket error, trying to recover
            raise BackendException('Boto requires a bucket name.')

        self.scheme = parsed_url.scheme

        if self.url_parts:
            self.key_prefix = '%s/' % '/'.join(self.url_parts)
        else:
            self.key_prefix = ''

        self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
        self.parsed_url = parsed_url

        # duplicity and boto.storage_uri() have different URI formats.
        # boto uses scheme://bucket[/name] and specifies hostname on connect()
        self.boto_uri_str = '://'.join((parsed_url.scheme[:2],
                                        parsed_url.path.lstrip('/')))
        self.storage_uri = boto.storage_uri(self.boto_uri_str)
        self.resetConnection()

    def resetConnection(self):
        self.bucket = None
        self.conn = None

        try:
            from boto.s3.connection import S3Connection
            from boto.s3.key import Key
            assert hasattr(S3Connection, 'lookup')

            # Newer versions of boto default to using
            # virtual hosting for buckets as a result of
            # upstream deprecation of the old-style access
            # method by Amazon S3. This change is not
            # backwards compatible (in particular with
            # respect to upper case characters in bucket
            # names); so we default to forcing use of the
            # old-style method unless the user has
            # explicitly asked us to use new-style bucket
            # access.
            #
            # Note that if the user wants to use new-style
            # buckets, we use the subdomain calling form
            # rather than given the option of both
            # subdomain and vhost. The reason being that
            # anything addressable as a vhost, is also
            # addressable as a subdomain. Seeing as the
            # latter is mostly a convenience method of
            # allowing browse:able content semi-invisibly
            # being hosted on S3, the former format makes
            # a lot more sense for us to use - being
            # explicit about what is happening (the fact
            # that we are talking to S3 servers).

            try:
                from boto.s3.connection import OrdinaryCallingFormat
                from boto.s3.connection import SubdomainCallingFormat
                cfs_supported = True
                calling_format = OrdinaryCallingFormat()
            except ImportError:
                cfs_supported = False
                calling_format = None

            if globals.s3_use_new_style:
                if cfs_supported:
                    calling_format = SubdomainCallingFormat()
                else:
                    log.FatalError("Use of new-style (subdomain) S3 bucket addressing was"
                                   "requested, but does not seem to be supported by the "
                                   "boto library. Either you need to upgrade your boto "
                                   "library or duplicity has failed to correctly detect "
                                   "the appropriate support.",
                                   log.ErrorCode.boto_old_style)
            else:
                if cfs_supported:
                    calling_format = OrdinaryCallingFormat()
                else:
                    calling_format = None

        except ImportError:
            log.FatalError("This backend (s3) requires boto library, version %s or later, "
                           "(http://code.google.com/p/boto/)." % BOTO_MIN_VERSION,
                           log.ErrorCode.boto_lib_too_old)

        if not self.parsed_url.hostname:
            # Use the default host.
            self.conn = self.storage_uri.connect(
                is_secure=(not globals.s3_unencrypted_connection))
        else:
            assert self.scheme == 's3'
            self.conn = self.storage_uri.connect(
                host=self.parsed_url.hostname,
                is_secure=(not globals.s3_unencrypted_connection))

        if hasattr(self.conn, 'calling_format'):
            if calling_format is None:
                log.FatalError("It seems we previously failed to detect support for calling "
                               "formats in the boto library, yet the support is there. This is "
                               "almost certainly a duplicity bug.",
                               log.ErrorCode.boto_calling_format)
            else:
                self.conn.calling_format = calling_format

        else:
            # Duplicity hangs if boto gets a null bucket name.
            # HC: Caught a socket error, trying to recover
            raise BackendException('Boto requires a bucket name.')

        self.bucket = self.conn.lookup(self.bucket_name)

    def put(self, source_path, remote_filename=None):
        from boto.s3.connection import Location
        if globals.s3_european_buckets:
            if not globals.s3_use_new_style:
                log.FatalError("European bucket creation was requested, but not new-style "
                               "bucket addressing (--s3-use-new-style)",
                               log.ErrorCode.s3_bucket_not_style)
        #Network glitch may prevent first few attempts of creating/looking up a bucket
        for n in range(1, globals.num_retries+1):
            if self.bucket:
                break
            if n > 1:
                time.sleep(30)
            try:
                try:
                    self.bucket = self.conn.get_bucket(self.bucket_name, validate=True)
                except Exception, e:
                    if "NoSuchBucket" in str(e):
                        if globals.s3_european_buckets:
                            self.bucket = self.conn.create_bucket(self.bucket_name,
                                                                  location=Location.EU)
                        else:
                            self.bucket = self.conn.create_bucket(self.bucket_name)
                    else:
                        raise e
            except Exception, e:
                log.Warn("Failed to create bucket (attempt #%d) '%s' failed (reason: %s: %s)"
                         "" % (n, self.bucket_name,
                               e.__class__.__name__,
                               str(e)))
                self.resetConnection()

        if not remote_filename:
            remote_filename = source_path.get_filename()
        key = self.bucket.new_key(self.key_prefix + remote_filename)

        for n in range(1, globals.num_retries+1):
            if n > 1:
                # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
                time.sleep(10)

            if globals.s3_use_rrs:
                storage_class = 'REDUCED_REDUNDANCY'
            else:
                storage_class = 'STANDARD'
            log.Info("Uploading %s/%s to %s Storage" % (self.straight_url, remote_filename, storage_class))
            try:
                key.set_contents_from_filename(source_path.name, {'Content-Type': 'application/octet-stream',
                                                                  'x-amz-storage-class': storage_class},
                                                cb=progress.report_transfer,
                                                num_cb=(max(2, 8 * globals.volsize / (1024 * 1024)))
                                              ) # Max num of callbacks = 8 times x megabyte

                key.close()
                self.resetConnection()
                return
            except Exception, e:
                log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url,
                               remote_filename,
                               n,
                               e.__class__.__name__,
                               str(e)))
                log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
                self.resetConnection()
Esempio n. 28
0
                except FatalBackendError, e:
                    # die on fatal errors
                    raise e
                except Exception, e:
                    # retry on anything else
                    log.Warn("Attempt %s failed. %s: %s" %
                             (n, e.__class__.__name__, str(e)))
                    log.Debug("Backtrace of previous error: %s" %
                              exception_traceback())
                    time.sleep(10)  # wait a bit before trying again
        # final trial, die on exception
            self.retry_count = n + 1
            return fn(self, *args)
        except Exception, e:
            log.Debug("Backtrace of previous error: %s" %
                      exception_traceback())
            log.FatalError(
                "Giving up after %s attempts. %s: %s" %
                (self.retry_count, e.__class__.__name__, str(e)),
                log.ErrorCode.backend_error)
        self.retry_count = 0

    return _retry_fatal


class Backend:
    """
    Represents a generic duplicity backend, capable of storing and
    retrieving files.

    Concrete sub-classes are expected to implement:
Esempio n. 29
0
class BotoBackend(duplicity.backend.Backend):
    """
    Backend for Amazon's Simple Storage System, (aka Amazon S3), though
    the use of the boto module, (http://code.google.com/p/boto/).

    To make use of this backend you must set aws_access_key_id
    and aws_secret_access_key in your ~/.boto or /etc/boto.cfg
    with your Amazon Web Services key id and secret respectively.
    Alternatively you can export the environment variables
    AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
    """
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        from boto.s3.key import Key
        from boto.s3.multipart import MultiPartUpload

        # This folds the null prefix and all null parts, which means that:
        #  //MyBucket/ and //MyBucket are equivalent.
        #  //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
        self.url_parts = filter(lambda x: x != '', parsed_url.path.split('/'))

        if self.url_parts:
            self.bucket_name = self.url_parts.pop(0)
        else:
            # Duplicity hangs if boto gets a null bucket name.
            # HC: Caught a socket error, trying to recover
            raise BackendException('Boto requires a bucket name.')

        self.scheme = parsed_url.scheme

        self.key_class = Key

        if self.url_parts:
            self.key_prefix = '%s/' % '/'.join(self.url_parts)
        else:
            self.key_prefix = ''

        self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
        self.parsed_url = parsed_url
        self.resetConnection()

    def resetConnection(self):
        self.bucket = None
        self.conn = get_connection(self.scheme, self.parsed_url)
        self.bucket = self.conn.lookup(self.bucket_name)

    def put(self, source_path, remote_filename=None):
        from boto.s3.connection import Location
        if globals.s3_european_buckets:
            if not globals.s3_use_new_style:
                log.FatalError(
                    "European bucket creation was requested, but not new-style "
                    "bucket addressing (--s3-use-new-style)",
                    log.ErrorCode.s3_bucket_not_style)
        #Network glitch may prevent first few attempts of creating/looking up a bucket
        for n in range(1, globals.num_retries + 1):
            if self.bucket:
                break
            if n > 1:
                time.sleep(30)
            try:
                try:
                    self.bucket = self.conn.get_bucket(self.bucket_name,
                                                       validate=True)
                except Exception, e:
                    if "NoSuchBucket" in str(e):
                        if globals.s3_european_buckets:
                            self.bucket = self.conn.create_bucket(
                                self.bucket_name, location=Location.EU)
                        else:
                            self.bucket = self.conn.create_bucket(
                                self.bucket_name)
                    else:
                        raise e
            except Exception, e:
                log.Warn(
                    "Failed to create bucket (attempt #%d) '%s' failed (reason: %s: %s)"
                    "" % (n, self.bucket_name, e.__class__.__name__, str(e)))
                self.resetConnection()

        if not remote_filename:
            remote_filename = source_path.get_filename()
        key = self.key_prefix + remote_filename
        for n in range(1, globals.num_retries + 1):
            if n > 1:
                # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
                time.sleep(10)

            if globals.s3_use_rrs:
                storage_class = 'REDUCED_REDUNDANCY'
            else:
                storage_class = 'STANDARD'
            log.Info("Uploading %s/%s to %s Storage" %
                     (self.straight_url, remote_filename, storage_class))
            try:
                headers = {
                    'Content-Type': 'application/octet-stream',
                    'x-amz-storage-class': storage_class
                }
                self.upload(source_path.name, key, headers)
                self.resetConnection()
                return
            except Exception, e:
                log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url, remote_filename, n,
                               e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s" %
                          (exception_traceback(), ))
                self.resetConnection()
Esempio n. 30
0
                try:
                    self.retry_count = n
                    return fn(self, *args)
                except FatalBackendError, e:
                    # die on fatal errors
                    raise e
                except Exception, e:
                    # retry on anything else
                    log.Warn(_("Attempt %s failed. %s: %s") % (n, e.__class__.__name__, util.uexc(e)))
                    log.Debug(_("Backtrace of previous error: %s") % exception_traceback())
                    time.sleep(10)  # wait a bit before trying again
            # final trial, die on exception
            self.retry_count = n + 1
            return fn(self, *args)
        except Exception, e:
            log.Debug(_("Backtrace of previous error: %s") % exception_traceback())
            log.FatalError(
                _("Giving up after %s attempts. %s: %s") % (self.retry_count, e.__class__.__name__, util.uexc(e)),
                log.ErrorCode.backend_error,
            )
        self.retry_count = 0

    return _retry_fatal


class Backend:
    """
    Represents a generic duplicity backend, capable of storing and
    retrieving files.

    Concrete sub-classes are expected to implement:
Esempio n. 31
0
                except FatalBackendError, e:
                    # die on fatal errors
                    raise e
                except Exception, e:
                    # retry on anything else
                    log.Warn("Attempt %s failed. %s: %s"
                             % (n, e.__class__.__name__, str(e)))
                    log.Debug("Backtrace of previous error: %s"
                              % exception_traceback())
                    time.sleep(10) # wait a bit before trying again
        # final trial, die on exception
            self.retry_count = n+1
            return fn(self, *args)
        except Exception, e:
            log.Debug("Backtrace of previous error: %s"
                        % exception_traceback())
            log.FatalError("Giving up after %s attempts. %s: %s"
                         % (self.retry_count, e.__class__.__name__, str(e)),
                          log.ErrorCode.backend_error)
        self.retry_count = 0

    return _retry_fatal

class Backend:
    """
    Represents a generic duplicity backend, capable of storing and
    retrieving files.

    Concrete sub-classes are expected to implement:

      - put
Esempio n. 32
0
                    raise e
                except Exception, e:
                    # retry on anything else
                    log.Warn(
                        _("Attempt %s failed. %s: %s") %
                        (n, e.__class__.__name__, util.uexc(e)))
                    log.Debug(
                        _("Backtrace of previous error: %s") %
                        exception_traceback())
                    time.sleep(10)  # wait a bit before trying again
        # final trial, die on exception
            self.retry_count = n + 1
            return fn(self, *args)
        except Exception, e:
            log.Debug(
                _("Backtrace of previous error: %s") % exception_traceback())
            log.FatalError(
                _("Giving up after %s attempts. %s: %s") %
                (self.retry_count, e.__class__.__name__, util.uexc(e)),
                log.ErrorCode.backend_error)
        self.retry_count = 0

    return _retry_fatal


class Backend:
    """
    Represents a generic duplicity backend, capable of storing and
    retrieving files.

    Concrete sub-classes are expected to implement: