コード例 #1
0
    def connect(self):
        # get the path to the command executable
        path = os.environ.get(u"IDEVSPATH")
        if path is None:
            log.Warn(u"-" * 72)
            log.Warn(
                u"WARNING: No path to 'idevsutil_dedup' has been set. Download module from"
            )
            log.Warn(
                u"   https://www.idrivedownloads.com/downloads/linux/download-options/IDrive_linux_64bit.zip"
            )
            log.Warn(u"or")
            log.Warn(
                u"   https://www.idrivedownloads.com/downloads/linux/download-options/IDrive_linux_32bit.zip"
            )
            log.Warn(
                u"and place anywhere with exe rights. Then creat env var 'IDEVSPATH' with path to file"
            )
            log.Warn(u"-" * 72)
            raise BackendException(
                u"No IDEVSPATH env var set. Should contain folder to idevsutil_dedup"
            )
        self.cmd = os.path.join(path, u"idevsutil_dedup")
        log.Debug(u"IDrive command base: %s" % (self.cmd))

        # get the account-id
        self.idriveid = os.environ.get(u"IDRIVEID")
        if self.idriveid is None:
            log.Warn(u"-" * 72)
            log.Warn(u"WARNING: IDrive logon ID missing")
            log.Warn(
                u"Create an environment variable IDriveID with your IDrive logon ID"
            )
            log.Warn(u"-" * 72)
            raise BackendException(
                u"No IDRIVEID env var set. Should contain IDrive id")
        log.Debug(u"IDrive id: %s" % (self.idriveid))

        # Get the full-path to the account password file
        filepath = os.environ.get(u"IDPWDFILE")
        if filepath is None:
            log.Warn(u"-" * 72)
            log.Warn(u"WARNING: IDrive password file missging")
            log.Warn(u"Please create a file with your IDrive logon password,")
            log.Warn(
                u"Then create an environment variable IDPWDFILE with path/filename of said file"
            )
            log.Warn(u"-" * 72)
            raise BackendException(
                u"No IDPWDFILE env var set. Should contain file with password")
        log.Debug(u"IDrive pwdpath: %s" % (filepath))
        self.auth_switch = u" --password-file={0}".format(filepath)

        # fakeroot set? Create directory and mark for cleanup
        if config.fakeroot is None:
            self.cleanup = False
            self.fakeroot = u''
        else:
            # Make sure fake root is created at root level!
            self.fakeroot = os.path.join(u'/', config.fakeroot)
            try:
                os.mkdir(self.fakeroot)
            except OSError as e:
                self.cleanup = False
                if e.errno == errno.EEXIST:
                    log.Debug(
                        u"Using existing directory {0} as fake-root".format(
                            self.fakeroot))
                else:
                    log.Warn(u"-" * 72)
                    log.Warn(
                        u"WARNING: Creation of FAKEROOT {0} failed; backup will use system temp directory"
                        .format(self.fakeroot))
                    log.Warn(u"This might interfere with incremental backups")
                    log.Warn(u"-" * 72)
                    raise BackendException(
                        u"Creation of the directory {0} failed".format(
                            self.fakeroot))
            else:
                log.Debug(
                    u"Directory {0} created as fake-root (Will clean-up afterwards!)"
                    .format(self.fakeroot))
                self.cleanup = True

        # get the bucket
        self.bucket = os.environ.get(u"IDBUCKET")
        if self.bucket is None:
            log.Warn(u"-" * 72)
            log.Warn(u"WARNING: IDrive backup bucket missing")
            log.Warn(
                u"Create an environment variable IDBUCKET specifying the target bucket"
            )
            log.Warn(u"-" * 72)
            raise BackendException(
                u"No IDBUCKET env var set. Should contain IDrive backup bucket"
            )
        log.Debug(u"IDrive bucket: %s" % (self.bucket))

        # check account / get config status and config type
        el = self.request(
            self.cmd + self.auth_switch +
            u" --validate --user={0}".format(self.idriveid)).find(u'tree')

        if el.attrib[u"message"] != u"SUCCESS":
            raise BackendException(u"Protocol failure - " + el.attrib[u"desc"])
        if el.attrib[u"desc"] != u"VALID ACCOUNT":
            raise BackendException(u"IDrive account invalid")
        if el.attrib[u"configstatus"] != u"SET":
            raise BackendException(u"IDrive account not set")

        # When private encryption enabled: get the full-path to a encription key file
        if el.attrib[u"configtype"] == u"PRIVATE":
            filepath = os.environ.get(u"IDKEYFILE")
            if filepath is None:
                log.Warn(u"-" * 72)
                log.Warn(u"WARNING: IDrive encryption key file missging")
                log.Warn(
                    u"Please create a file with your IDrive encryption key,")
                log.Warn(
                    u"Then create an environment variable IDKEYFILE with path/filename of said file"
                )
                log.Warn(u"-" * 72)
                raise BackendException(
                    u"No IDKEYFILE env var set. Should contain file with encription key"
                )
            log.Debug(u"IDrive keypath: %s" % (filepath))
            self.auth_switch += u" --pvt-key={0}".format(filepath)

        # get the server address
        el = self.request(
            self.cmd + self.auth_switch +
            u" --getServerAddress {0}".format(self.idriveid)).find(u'tree')
        self.idriveserver = el.attrib[u"cmdUtilityServer"]

        # get the device list - primarely used to get device-id string
        el = self.request(self.cmd + self.auth_switch +
                          u" --list-device {0}@{1}::home".format(
                              self.idriveid, self.idriveserver))
        # scan all returned devices for requested device (== bucket)
        self.idrivedevid = None
        for item in el.findall(u'item'):
            if item.attrib[u'nick_name'] == self.bucket:
                # prefix and suffix reverse-engineered from Common.pl!
                self.idrivedevid = u"5c0b" + item.attrib[u"device_id"] + u"4b5z"
        if self.idrivedevid is None:
            el = self.request(
                self.cmd + self.auth_switch +
                u" --create-bucket --bucket-type=D --nick-name={0} --os=Linux --uid=987654321 {1}@{2}::home/"
                .format(self.bucket, self.idriveid, self.idriveserver)).find(
                    u'item')
            # prefix and suffix reverse-engineered from Common.pl!
            self.idrivedevid = u"5c0b" + el.attrib[u"device_id"] + u"4b5z"

        # We're fully connected!
        self.connected = True
        log.Debug(u"User fully connected")
コード例 #2
0
ファイル: _boto_multi.py プロジェクト: mrrobot47/duplicity
    def upload(self, filename, key, headers=None):
        import boto  # pylint: disable=import-error

        chunk_size = config.s3_multipart_chunk_size

        # Check minimum chunk size for S3
        if chunk_size < config.s3_multipart_minimum_chunk_size:
            log.Warn(u"Minimum chunk size is %d, but %d specified." %
                     (config.s3_multipart_minimum_chunk_size, chunk_size))
            chunk_size = config.s3_multipart_minimum_chunk_size

        # Decide in how many chunks to upload
        bytes = os.path.getsize(filename)  # pylint: disable=redefined-builtin
        if bytes < chunk_size:
            chunks = 1
        else:
            chunks = bytes // chunk_size
            if (bytes % chunk_size):
                chunks += 1

        log.Debug(u"Uploading %d bytes in %d chunks" % (bytes, chunks))

        mp = self.bucket.initiate_multipart_upload(
            key.key, headers, encrypt_key=config.s3_use_sse)

        # Initiate a queue to share progress data between the pool
        # workers and a consumer thread, that will collect and report
        queue = None
        if config.progress:
            manager = multiprocessing.Manager()
            queue = manager.Queue()
            consumer = ConsumerThread(queue, bytes)
            consumer.start()
        tasks = []
        for n in range(chunks):
            storage_uri = boto.storage_uri(self.boto_uri_str)
            params = [
                self.scheme, self.parsed_url, storage_uri, self.bucket_name,
                mp.id, filename, n, chunk_size, config.num_retries, queue
            ]
            tasks.append(
                self._pool.apply_async(multipart_upload_worker, params))

        log.Debug(u"Waiting for the pool to finish processing %s tasks" %
                  len(tasks))
        while tasks:
            try:
                tasks[0].wait(timeout=config.s3_multipart_max_timeout)
                if tasks[0].ready():
                    if tasks[0].successful():
                        del tasks[0]
                    else:
                        log.Debug(
                            u"Part upload not successful, aborting multipart upload."
                        )
                        self._setup_pool()
                        break
                else:
                    raise multiprocessing.TimeoutError
            except multiprocessing.TimeoutError:
                log.Debug(u"%s tasks did not finish by the specified timeout,"
                          u"aborting multipart upload and resetting pool." %
                          len(tasks))
                self._setup_pool()
                break

        log.Debug(u"Done waiting for the pool to finish processing")

        # Terminate the consumer thread, if any
        if config.progress:
            consumer.finish = True
            consumer.join()

        if len(tasks) > 0 or len(mp.get_all_parts()) < chunks:
            mp.cancel_upload()
            raise BackendException(u"Multipart upload failed. Aborted.")

        return mp.complete_upload()
コード例 #3
0
class BotoBackend(duplicity.backend.Backend):
    """
    Backend for Amazon's Simple Storage System, (aka Amazon S3), though
    the use of the boto module, (http://code.google.com/p/boto/).

    To make use of this backend you must set aws_access_key_id
    and aws_secret_access_key in your ~/.boto or /etc/boto.cfg
    with your Amazon Web Services key id and secret respectively.
    Alternatively you can export the environment variables
    AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
    """

    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        import boto
        assert boto.Version >= BOTO_MIN_VERSION

        from boto.s3.key import Key

        # This folds the null prefix and all null parts, which means that:
        #  //MyBucket/ and //MyBucket are equivalent.
        #  //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
        self.url_parts = filter(lambda x: x != '', parsed_url.path.split('/'))

        if self.url_parts:
            self.bucket_name = self.url_parts.pop(0)
        else:
            # Duplicity hangs if boto gets a null bucket name.
            # HC: Caught a socket error, trying to recover
            raise BackendException('Boto requires a bucket name.')

        self.scheme = parsed_url.scheme

        self.key_class = Key

        if self.url_parts:
            self.key_prefix = '%s/' % '/'.join(self.url_parts)
        else:
            self.key_prefix = ''

        self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
        self.parsed_url = parsed_url
        self.resetConnection()

    def resetConnection(self):
        self.bucket = None
        self.conn = None

        try:
            from boto.s3.connection import S3Connection
            from boto.s3.key import Key
            assert hasattr(S3Connection, 'lookup')

            # Newer versions of boto default to using
            # virtual hosting for buckets as a result of
            # upstream deprecation of the old-style access
            # method by Amazon S3. This change is not
            # backwards compatible (in particular with
            # respect to upper case characters in bucket
            # names); so we default to forcing use of the
            # old-style method unless the user has
            # explicitly asked us to use new-style bucket
            # access.
            #
            # Note that if the user wants to use new-style
            # buckets, we use the subdomain calling form
            # rather than given the option of both
            # subdomain and vhost. The reason being that
            # anything addressable as a vhost, is also
            # addressable as a subdomain. Seeing as the
            # latter is mostly a convenience method of
            # allowing browse:able content semi-invisibly
            # being hosted on S3, the former format makes
            # a lot more sense for us to use - being
            # explicit about what is happening (the fact
            # that we are talking to S3 servers).

            try:
                from boto.s3.connection import OrdinaryCallingFormat
                from boto.s3.connection import SubdomainCallingFormat
                cfs_supported = True
                calling_format = OrdinaryCallingFormat()
            except ImportError:
                cfs_supported = False
                calling_format = None

            if globals.s3_use_new_style:
                if cfs_supported:
                    calling_format = SubdomainCallingFormat()
                else:
                    log.FatalError("Use of new-style (subdomain) S3 bucket addressing was"
                                   "requested, but does not seem to be supported by the "
                                   "boto library. Either you need to upgrade your boto "
                                   "library or duplicity has failed to correctly detect "
                                   "the appropriate support.",
                                   log.ErrorCode.boto_old_style)
            else:
                if cfs_supported:
                    calling_format = OrdinaryCallingFormat()
                else:
                    calling_format = None

        except ImportError:
            log.FatalError("This backend (s3) requires boto library, version %s or later, "
                           "(http://code.google.com/p/boto/)." % BOTO_MIN_VERSION,
                           log.ErrorCode.boto_lib_too_old)

        if self.scheme == 's3+http':
            # Use the default Amazon S3 host.
            self.conn = S3Connection(is_secure=(not globals.s3_unencrypted_connection))
        else:
            assert self.scheme == 's3'
            self.conn = S3Connection(
                host=self.parsed_url.hostname,
                is_secure=(not globals.s3_unencrypted_connection))

        if hasattr(self.conn, 'calling_format'):
            if calling_format is None:
                log.FatalError("It seems we previously failed to detect support for calling "
                               "formats in the boto library, yet the support is there. This is "
                               "almost certainly a duplicity bug.",
                               log.ErrorCode.boto_calling_format)
            else:
                self.conn.calling_format = calling_format

        else:
            # Duplicity hangs if boto gets a null bucket name.
            # HC: Caught a socket error, trying to recover
            raise BackendException('Boto requires a bucket name.')

        self.bucket = self.conn.lookup(self.bucket_name)

    def put(self, source_path, remote_filename=None):
        from boto.s3.connection import Location
        if globals.s3_european_buckets:
            if not globals.s3_use_new_style:
                log.FatalError("European bucket creation was requested, but not new-style "
                               "bucket addressing (--s3-use-new-style)",
                               log.ErrorCode.s3_bucket_not_style)
        #Network glitch may prevent first few attempts of creating/looking up a bucket
        for n in range(1, globals.num_retries+1):
            if self.bucket:
                break
            if n > 1:
                time.sleep(30)
            try:
                try:
                    self.bucket = self.conn.get_bucket(self.bucket_name, validate=True)
                except Exception, e:
                    if "NoSuchBucket" in str(e):
                        if globals.s3_european_buckets:
                            self.bucket = self.conn.create_bucket(self.bucket_name,
                                                                  location=Location.EU)
                        else:
                            self.bucket = self.conn.create_bucket(self.bucket_name)
                    else:
                        raise e
            except Exception, e:
                log.Warn("Failed to create bucket (attempt #%d) '%s' failed (reason: %s: %s)"
                         "" % (n, self.bucket_name,
                               e.__class__.__name__,
                               str(e)))
                self.resetConnection()

        if not remote_filename:
            remote_filename = source_path.get_filename()
        key = self.key_class(self.bucket)
        key.key = self.key_prefix + remote_filename
        for n in range(1, globals.num_retries+1):
            if n > 1:
                # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
                time.sleep(10)

            if globals.s3_use_rrs:
                storage_class = 'REDUCED_REDUNDANCY'
            else:
                storage_class = 'STANDARD'
            log.Info("Uploading %s/%s to %s Storage" % (self.straight_url, remote_filename, storage_class))
            try:
                key.set_contents_from_filename(source_path.name, {'Content-Type': 'application/octet-stream',
                                                                  'x-amz-storage-class': storage_class})
                key.close()
                self.resetConnection()
                return
            except Exception, e:
                log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url,
                               remote_filename,
                               n,
                               e.__class__.__name__,
                               str(e)))
                log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
                self.resetConnection()
コード例 #4
0
def parse_cmdline_options(arglist):
    """Parse argument list"""
    global select_opts, select_files, full_backup
    global list_current, collection_status, cleanup, remove_time, verify

    def use_gio(*args):
        try:
            import duplicity.backends.giobackend
            backend.force_backend(duplicity.backends.giobackend.GIOBackend)
        except ImportError:
            log.FatalError(_("Unable to load gio module"),
                           log.ErrorCode.gio_not_available)

    def set_log_fd(fd):
        if fd < 1:
            raise optparse.OptionValueError(
                "log-fd must be greater than zero.")
        log.add_fd(fd)

    def set_time_sep(sep, opt):
        if sep == '-':
            raise optparse.OptionValueError(
                "Dash ('-') not valid for time-separator.")
        globals.time_separator = sep
        old_fn_deprecation(opt)

    def add_selection(o, s, v, p):
        select_opts.append((s, v))

    def add_filelist(o, s, v, p):
        filename = v
        select_opts.append((s, filename))
        try:
            select_files.append(open(filename, "r"))
        except IOError:
            log.FatalError(
                _("Error opening file %s") % filename,
                log.ErrorCode.cant_open_filelist)

    def print_ver(o, s, v, p):
        print "duplicity %s" % (globals.version)
        sys.exit(0)

    def add_rename(o, s, v, p):
        globals.rename[os.path.normcase(os.path.normpath(v[0]))] = v[1]

    parser = OPHelpFix(option_class=DupOption, usage=usage())

    # If this is true, only warn and don't raise fatal error when backup
    # source directory doesn't match previous backup source directory.
    parser.add_option("--allow-source-mismatch", action="store_true")

    # Set to the path of the archive directory (the directory which
    # contains the signatures and manifests of the relevent backup
    # collection), and for checkpoint state between volumes.
    # TRANSL: Used in usage help to represent a Unix-style path name. Example:
    # --archive-dir <path>
    parser.add_option("--archive-dir", type="file", metavar=_("path"))

    # Asynchronous put/get concurrency limit
    # (default of 0 disables asynchronicity).
    parser.add_option("--asynchronous-upload",
                      action="store_const",
                      const=1,
                      dest="async_concurrency")

    # config dir for future use
    parser.add_option("--config-dir",
                      type="file",
                      metavar=_("path"),
                      help=optparse.SUPPRESS_HELP)

    # for testing -- set current time
    parser.add_option("--current-time",
                      type="int",
                      dest="current_time",
                      help=optparse.SUPPRESS_HELP)

    # Don't actually do anything, but still report what would be done
    parser.add_option("--dry-run", action="store_true")

    # TRANSL: Used in usage help to represent an ID for a GnuPG key. Example:
    # --encrypt-key <gpg_key_id>
    parser.add_option("--encrypt-key",
                      type="string",
                      metavar=_("gpg-key-id"),
                      dest="",
                      action="callback",
                      callback=lambda o, s, v, p: globals.gpg_profile.
                      recipients.append(v))  #@UndefinedVariable

    # secret keyring in which the private encrypt key can be found
    parser.add_option("--encrypt-secret-keyring",
                      type="string",
                      metavar=_("path"))

    parser.add_option(
        "--encrypt-sign-key",
        type="string",
        metavar=_("gpg-key-id"),
        dest="",
        action="callback",
        callback=lambda o, s, v, p:
        (globals.gpg_profile.recipients.append(v), set_sign_key(v)))

    # TRANSL: Used in usage help to represent a "glob" style pattern for
    # matching one or more files, as described in the documentation.
    # Example:
    # --exclude <shell_pattern>
    parser.add_option("--exclude",
                      action="callback",
                      metavar=_("shell_pattern"),
                      dest="",
                      type="string",
                      callback=add_selection)

    parser.add_option("--exclude-device-files",
                      action="callback",
                      dest="",
                      callback=add_selection)

    parser.add_option("--exclude-filelist",
                      type="file",
                      metavar=_("filename"),
                      dest="",
                      action="callback",
                      callback=add_filelist)

    parser.add_option("--exclude-filelist-stdin",
                      action="callback",
                      dest="",
                      callback=lambda o, s, v, p:
                      (select_opts.append(
                          ("--exclude-filelist", "standard input")),
                       select_files.append(sys.stdin)))

    parser.add_option("--exclude-globbing-filelist",
                      type="file",
                      metavar=_("filename"),
                      dest="",
                      action="callback",
                      callback=add_filelist)

    # TRANSL: Used in usage help to represent the name of a file. Example:
    # --log-file <filename>
    parser.add_option("--exclude-if-present",
                      metavar=_("filename"),
                      dest="",
                      type="file",
                      action="callback",
                      callback=add_selection)

    parser.add_option("--exclude-other-filesystems",
                      action="callback",
                      dest="",
                      callback=add_selection)

    # TRANSL: Used in usage help to represent a regular expression (regexp).
    parser.add_option("--exclude-regexp",
                      metavar=_("regular_expression"),
                      dest="",
                      type="string",
                      action="callback",
                      callback=add_selection)

    # Whether we should be particularly aggressive when cleaning up
    parser.add_option("--extra-clean", action="store_true")

    # used in testing only - raises exception after volume
    parser.add_option("--fail-on-volume",
                      type="int",
                      help=optparse.SUPPRESS_HELP)

    # used to provide a prefix on top of the defaul tar file name
    parser.add_option("--file-prefix",
                      type="string",
                      dest="file_prefix",
                      action="store")

    # used in testing only - skips upload for a given volume
    parser.add_option("--skip-volume", type="int", help=optparse.SUPPRESS_HELP)

    # If set, restore only the subdirectory or file specified, not the
    # whole root.
    # TRANSL: Used in usage help to represent a Unix-style path name. Example:
    # --archive-dir <path>
    parser.add_option("--file-to-restore",
                      "-r",
                      action="callback",
                      type="file",
                      metavar=_("path"),
                      dest="restore_dir",
                      callback=lambda o, s, v, p: setattr(
                          p.values, "restore_dir", v.rstrip('/')))

    # Used to confirm certain destructive operations like deleting old files.
    parser.add_option("--force", action="store_true")

    # FTP data connection type
    parser.add_option("--ftp-passive",
                      action="store_const",
                      const="passive",
                      dest="ftp_connection")
    parser.add_option("--ftp-regular",
                      action="store_const",
                      const="regular",
                      dest="ftp_connection")

    # If set, forces a full backup if the last full backup is older than
    # the time specified
    parser.add_option("--full-if-older-than",
                      type="time",
                      dest="full_force_time",
                      metavar=_("time"))

    parser.add_option("--gio", action="callback", callback=use_gio)

    parser.add_option("--gpg-options", action="extend", metavar=_("options"))

    # ignore (some) errors during operations; supposed to make it more
    # likely that you are able to restore data under problematic
    # circumstances. the default should absolutely always be False unless
    # you know what you are doing.
    parser.add_option(
        "--ignore-errors",
        action="callback",
        dest="ignore_errors",
        callback=lambda o, s, v, p: (log.Warn(
            _("Running in 'ignore errors' mode due to %s; please "
              "re-consider if this was not intended") % s),
                                     setattr(p.values, "ignore errors", True)))

    # Whether to use the full email address as the user name when
    # logging into an imap server. If false just the user name
    # part of the email address is used.
    parser.add_option("--imap-full-address",
                      action="store_true",
                      help=optparse.SUPPRESS_HELP)

    # Name of the imap folder where we want to store backups.
    # Can be changed with a command line argument.
    # TRANSL: Used in usage help to represent an imap mailbox
    parser.add_option("--imap-mailbox", metavar=_("imap_mailbox"))

    parser.add_option("--include",
                      action="callback",
                      metavar=_("shell_pattern"),
                      dest="",
                      type="string",
                      callback=add_selection)
    parser.add_option("--include-filelist",
                      type="file",
                      metavar=_("filename"),
                      dest="",
                      action="callback",
                      callback=add_filelist)
    parser.add_option("--include-filelist-stdin",
                      action="callback",
                      dest="",
                      callback=lambda o, s, v, p:
                      (select_opts.append(
                          ("--include-filelist", "standard input")),
                       select_files.append(sys.stdin)))
    parser.add_option("--include-globbing-filelist",
                      type="file",
                      metavar=_("filename"),
                      dest="",
                      action="callback",
                      callback=add_filelist)
    parser.add_option("--include-regexp",
                      metavar=_("regular_expression"),
                      dest="",
                      type="string",
                      action="callback",
                      callback=add_selection)

    parser.add_option("--log-fd",
                      type="int",
                      metavar=_("file_descriptor"),
                      dest="",
                      action="callback",
                      callback=lambda o, s, v, p: set_log_fd(v))

    # TRANSL: Used in usage help to represent the name of a file. Example:
    # --log-file <filename>
    parser.add_option("--log-file",
                      type="file",
                      metavar=_("filename"),
                      dest="",
                      action="callback",
                      callback=lambda o, s, v, p: log.add_file(v))

    # TRANSL: Used in usage help (noun)
    parser.add_option("--name", dest="backup_name", metavar=_("backup name"))

    # If set to false, then do not encrypt files on remote system
    parser.add_option("--no-encryption",
                      action="store_false",
                      dest="encryption")

    # If set to false, then do not compress files on remote system
    parser.add_option("--no-compression",
                      action="store_false",
                      dest="compression")

    # If set, print the statistics after every backup session
    parser.add_option("--no-print-statistics",
                      action="store_false",
                      dest="print_statistics")

    # If true, filelists and directory statistics will be split on
    # nulls instead of newlines.
    parser.add_option("--null-separator", action="store_true")

    # number of retries on network operations
    # TRANSL: Used in usage help to represent a desired number of
    # something. Example:
    # --num-retries <number>
    parser.add_option("--num-retries", type="int", metavar=_("number"))

    # File owner uid keeps number from tar file. Like same option in GNU tar.
    parser.add_option("--numeric-owner", action="store_true")

    # Whether the old filename format is in effect.
    parser.add_option("--old-filenames",
                      action="callback",
                      dest="old_filenames",
                      callback=lambda o, s, v, p:
                      (setattr(p.values, o.dest, True), old_fn_deprecation(s)))

    # option to trigger Pydev debugger
    parser.add_option("--pydevd", action="store_true")

    # option to rename files during restore
    parser.add_option("--rename",
                      type="file",
                      action="callback",
                      nargs=2,
                      callback=add_rename)

    # Restores will try to bring back the state as of the following time.
    # If it is None, default to current time.
    # TRANSL: Used in usage help to represent a time spec for a previous
    # point in time, as described in the documentation. Example:
    # duplicity remove-older-than time [options] target_url
    parser.add_option("--restore-time",
                      "--time",
                      "-t",
                      type="time",
                      metavar=_("time"))

    # user added rsync options
    parser.add_option("--rsync-options", action="extend", metavar=_("options"))

    # Whether to create European buckets (sorry, hard-coded to only
    # support european for now).
    parser.add_option("--s3-european-buckets", action="store_true")

    # Whether to use S3 Reduced Redudancy Storage
    parser.add_option("--s3-use-rrs", action="store_true")

    # Whether to use "new-style" subdomain addressing for S3 buckets. Such
    # use is not backwards-compatible with upper-case buckets, or buckets
    # that are otherwise not expressable in a valid hostname.
    parser.add_option("--s3-use-new-style", action="store_true")

    # Whether to use plain HTTP (without SSL) to send data to S3
    # See <https://bugs.launchpad.net/duplicity/+bug/433970>.
    parser.add_option("--s3-unencrypted-connection", action="store_true")

    # Chunk size used for S3 multipart uploads.The number of parallel uploads to
    # S3 be given by chunk size / volume size. Use this to maximize the use of
    # your bandwidth. Defaults to 25MB
    parser.add_option(
        "--s3-multipart-chunk-size",
        type="int",
        action="callback",
        metavar=_("number"),
        callback=lambda o, s, v, p: setattr(
            p.values, "s3_multipart_chunk_size", v * 1024 * 1024))

    # Option to allow the s3/boto backend use the multiprocessing version.
    # By default it is off since it does not work for Python 2.4 or 2.5.
    if sys.version_info[:2] >= (2, 6):
        parser.add_option("--s3-use-multiprocessing", action="store_true")

    # scp command to use
    # TRANSL: noun
    parser.add_option("--scp-command",
                      nargs=1,
                      type="string",
                      action="callback",
                      callback=scp_deprecation)

    # sftp command to use
    # TRANSL: noun
    parser.add_option("--sftp-command",
                      nargs=1,
                      type="string",
                      action="callback",
                      callback=scp_deprecation)

    # If set, use short (< 30 char) filenames for all the remote files.
    parser.add_option("--short-filenames",
                      action="callback",
                      dest="short_filenames",
                      callback=lambda o, s, v, p:
                      (setattr(p.values, o.dest, True), old_fn_deprecation(s)))

    # TRANSL: Used in usage help to represent an ID for a GnuPG key. Example:
    # --encrypt-key <gpg_key_id>
    parser.add_option("--sign-key",
                      type="string",
                      metavar=_("gpg-key-id"),
                      dest="",
                      action="callback",
                      callback=lambda o, s, v, p: set_sign_key(v))

    # default to batch mode using public-key encryption
    parser.add_option("--ssh-askpass", action="store_true")

    # user added ssh options
    parser.add_option("--ssh-options", action="extend", metavar=_("options"))

    # Working directory for the tempfile module. Defaults to /tmp on most systems.
    parser.add_option("--tempdir",
                      dest="temproot",
                      type="file",
                      metavar=_("path"))

    # network timeout value
    # TRANSL: Used in usage help. Example:
    # --timeout <seconds>
    parser.add_option("--timeout", type="int", metavar=_("seconds"))

    # Character used like the ":" in time strings like
    # 2002-08-06T04:22:00-07:00.  The colon isn't good for filenames on
    # windows machines.
    # TRANSL: abbreviation for "character" (noun)
    parser.add_option("--time-separator",
                      type="string",
                      metavar=_("char"),
                      action="callback",
                      callback=lambda o, s, v, p: set_time_sep(v, s))

    # Whether to specify --use-agent in GnuPG options
    parser.add_option("--use-agent", action="store_true")

    parser.add_option("--use-scp", action="store_true")

    parser.add_option("--verbosity",
                      "-v",
                      type="verbosity",
                      metavar="[0-9]",
                      dest="",
                      action="callback",
                      callback=lambda o, s, v, p: log.setverbosity(v))

    parser.add_option("-V", "--version", action="callback", callback=print_ver)

    # volume size
    # TRANSL: Used in usage help to represent a desired number of
    # something. Example:
    # --num-retries <number>
    parser.add_option("--volsize",
                      type="int",
                      action="callback",
                      metavar=_("number"),
                      callback=lambda o, s, v, p: setattr(
                          p.values, "volsize", v * 1024 * 1024))

    # parse the options
    (options, args) = parser.parse_args()

    # Copy all arguments and their values to the globals module.  Don't copy
    # attributes that are 'hidden' (start with an underscore) or whose name is
    # the empty string (used for arguments that don't directly store a value
    # by using dest="")
    for f in filter(lambda x: x and not x.startswith("_"), dir(options)):
        v = getattr(options, f)
        # Only set if v is not None because None is the default for all the
        # variables.  If user didn't set it, we'll use defaults in globals.py
        if v is not None:
            setattr(globals, f, v)

    socket.setdefaulttimeout(globals.timeout)

    # expect no cmd and two positional args
    cmd = ""
    num_expect = 2

    # process first arg as command
    if args:
        cmd = args.pop(0)
        possible = [c for c in commands if c.startswith(cmd)]
        # no unique match, that's an error
        if len(possible) > 1:
            command_line_error("command '%s' not unique, could be %s" %
                               (cmd, possible))
        # only one match, that's a keeper
        elif len(possible) == 1:
            cmd = possible[0]
        # no matches, assume no cmd
        elif not possible:
            args.insert(0, cmd)

    if cmd == "cleanup":
        cleanup = True
        num_expect = 1
    elif cmd == "collection-status":
        collection_status = True
        num_expect = 1
    elif cmd == "full":
        full_backup = True
        num_expect = 2
    elif cmd == "incremental":
        globals.incremental = True
        num_expect = 2
    elif cmd == "list-current-files":
        list_current = True
        num_expect = 1
    elif cmd == "remove-older-than":
        try:
            arg = args.pop(0)
        except Exception:
            command_line_error("Missing time string for remove-older-than")
        globals.remove_time = dup_time.genstrtotime(arg)
        num_expect = 1
    elif cmd == "remove-all-but-n-full" or cmd == "remove-all-inc-of-but-n-full":
        if cmd == "remove-all-but-n-full":
            globals.remove_all_but_n_full_mode = True
        if cmd == "remove-all-inc-of-but-n-full":
            globals.remove_all_inc_of_but_n_full_mode = True
        try:
            arg = args.pop(0)
        except Exception:
            command_line_error("Missing count for " + cmd)
        globals.keep_chains = int(arg)
        if not globals.keep_chains > 0:
            command_line_error(cmd + " count must be > 0")
        num_expect = 1
    elif cmd == "verify":
        verify = True

    if len(args) != num_expect:
        command_line_error("Expected %d args, got %d" %
                           (num_expect, len(args)))

    # expand pathname args, but not URL
    for loc in range(len(args)):
        if not '://' in args[loc]:
            args[loc] = expand_fn(args[loc])

    # Note that ProcessCommandLine depends on us verifying the arg
    # count here; do not remove without fixing it. We must make the
    # checks here in order to make enough sense of args to identify
    # the backend URL/lpath for args_to_path_backend().
    if len(args) < 1:
        command_line_error("Too few arguments")
    elif len(args) == 1:
        backend_url = args[0]
    elif len(args) == 2:
        lpath, backend_url = args_to_path_backend(args[0],
                                                  args[1])  #@UnusedVariable
    else:
        command_line_error("Too many arguments")

    if globals.backup_name is None:
        globals.backup_name = generate_default_backup_name(backend_url)

    # set and expand archive dir
    set_archive_dir(
        expand_archive_dir(globals.archive_dir, globals.backup_name))

    log.Debug(_("Using archive dir: %s") % (globals.archive_dir.name, ))
    log.Debug(_("Using backup name: %s") % (globals.backup_name, ))

    return args
コード例 #5
0
    def get_tarinfo(self):
        u"""Generate a tarfile.TarInfo object based on self

        Doesn't set size based on stat, because we may want to replace
        data wiht other stream.  Size should be set separately by
        calling function.

        """
        ti = tarfile.TarInfo()
        if self.index:
            ti.name = util.fsdecode(b"/".join(self.index))
        else:
            ti.name = u"."
        if self.isdir():
            ti.name += u"/"  # tar dir naming convention

        ti.size = 0
        if self.type:
            # Lots of this is specific to tarfile.py, hope it doesn't
            # change much...
            if self.isreg():
                ti.type = tarfile.REGTYPE
                ti.size = self.stat.st_size
            elif self.isdir():
                ti.type = tarfile.DIRTYPE
            elif self.isfifo():
                ti.type = tarfile.FIFOTYPE
            elif self.issym():
                ti.type = tarfile.SYMTYPE
                ti.linkname = self.symtext
                if isinstance(ti.linkname, bytes):
                    ti.linkname = util.fsdecode(ti.linkname)
            elif self.isdev():
                if self.type == u"chr":
                    ti.type = tarfile.CHRTYPE
                else:
                    ti.type = tarfile.BLKTYPE
                ti.devmajor, ti.devminor = self.devnums
            else:
                raise PathException(u"Unrecognized type " + str(self.type))

            ti.mode = self.mode
            ti.uid, ti.gid = self.stat.st_uid, self.stat.st_gid
            if self.stat.st_mtime < 0:
                log.Warn(
                    _(u"Warning: %s has negative mtime, treating as 0.") %
                    (util.fsdecode(self.get_relative_path())))
                ti.mtime = 0
            else:
                ti.mtime = int(self.stat.st_mtime)

            try:
                ti.uname = cached_ops.getpwuid(ti.uid)[0]
            except KeyError:
                ti.uname = u''
            try:
                ti.gname = cached_ops.getgrgid(ti.gid)[0]
            except KeyError:
                ti.gname = u''

            if ti.type in (tarfile.CHRTYPE, tarfile.BLKTYPE):
                if hasattr(os, u"major") and hasattr(os, u"minor"):
                    ti.devmajor, ti.devminor = self.devnums
        else:
            # Currently we depend on an uninitiliazed tarinfo file to
            # already have appropriate headers.  Still, might as well
            # make sure mode and size set.
            ti.mode, ti.size = 0, 0
        return ti
コード例 #6
0
 def run_scp_command(self, commandline):
     u""" Run an scp command, responding to password prompts """
     log.Info(u"Running '%s'" % commandline)
     child = pexpect.spawn(commandline, timeout=None)
     if globals.ssh_askpass:
         state = u"authorizing"
     else:
         state = u"copying"
     while 1:
         if state == u"authorizing":
             match = child.expect([
                 pexpect.EOF, u"(?i)timeout, server not responding",
                 u"(?i)pass(word|phrase .*):", u"(?i)permission denied",
                 u"authenticity"
             ])
             log.Debug(u"State = %s, Before = '%s'" %
                       (state, child.before.strip()))
             if match == 0:
                 log.Warn(u"Failed to authenticate")
                 break
             elif match == 1:
                 log.Warn(u"Timeout waiting to authenticate")
                 break
             elif match == 2:
                 child.sendline(self.password)
                 state = u"copying"
             elif match == 3:
                 log.Warn(u"Invalid SSH password")
                 break
             elif match == 4:
                 log.Warn(
                     u"Remote host authentication failed (missing known_hosts entry?)"
                 )
                 break
         elif state == u"copying":
             match = child.expect([
                 pexpect.EOF, u"(?i)timeout, server not responding",
                 u"stalled", u"authenticity", u"ETA"
             ])
             log.Debug(u"State = %s, Before = '%s'" %
                       (state, child.before.strip()))
             if match == 0:
                 break
             elif match == 1:
                 log.Warn(u"Timeout waiting for response")
                 break
             elif match == 2:
                 state = u"stalled"
             elif match == 3:
                 log.Warn(
                     u"Remote host authentication failed (missing known_hosts entry?)"
                 )
                 break
         elif state == u"stalled":
             match = child.expect([
                 pexpect.EOF, u"(?i)timeout, server not responding", u"ETA"
             ])
             log.Debug(u"State = %s, Before = '%s'" %
                       (state, child.before.strip()))
             if match == 0:
                 break
             elif match == 1:
                 log.Warn(u"Stalled for too long, aborted copy")
                 break
             elif match == 2:
                 state = u"copying"
     child.close(force=True)
     if child.exitstatus != 0:
         raise BackendException(u"Error running '%s'" % commandline)
コード例 #7
0
ファイル: sshbackend.py プロジェクト: jiaopengju/duplicity
 def run_sftp_command(self, commandline, commands):
     """ Run an sftp command, responding to password prompts, passing commands from list """
     maxread = 2000  # expected read buffer size
     responses = [
         pexpect.EOF, "(?i)timeout, server not responding", "sftp>",
         "(?i)pass(word|phrase .*):", "(?i)permission denied",
         "authenticity", "(?i)no such file or directory",
         "Couldn't delete file: No such file or directory",
         "Couldn't delete file", "open(.*): Failure"
     ]
     max_response_len = max([len(p) for p in responses[1:]])
     for n in range(1, globals.num_retries + 1):
         if n > 1:
             # sleep before retry
             time.sleep(30)
         log.Info("Running '%s' (attempt #%d)" % (commandline, n))
         child = pexpect.spawn(commandline, timeout=None, maxread=maxread)
         cmdloc = 0
         while 1:
             match = child.expect(responses,
                                  searchwindowsize=maxread +
                                  max_response_len)
             log.Debug("State = sftp, Before = '%s'" %
                       (child.before.strip()))
             if match == 0:
                 break
             elif match == 1:
                 log.Info("Timeout waiting for response")
                 break
             if match == 2:
                 if cmdloc < len(commands):
                     command = commands[cmdloc]
                     log.Info("sftp command: '%s'" % (command, ))
                     child.sendline(command)
                     cmdloc += 1
                 else:
                     command = 'quit'
                     child.sendline(command)
                     res = child.before
             elif match == 3:
                 child.sendline(self.password)
             elif match == 4:
                 log.Warn("Invalid SSH password")
                 break
             elif match == 5:
                 log.Warn(
                     "Host key authenticity could not be verified (missing known_hosts entry?)"
                 )
                 break
             elif match == 6:
                 if not child.before.strip().startswith("rm"):
                     log.Warn(
                         "Remote file or directory does not exist in command='%s'"
                         % (commandline, ))
                     break
             elif match == 7:
                 if not child.before.strip().startswith("Removing"):
                     log.Warn("Could not delete file in command='%s'" %
                              (commandline, ))
                     break
             elif match == 8:
                 log.Warn("Could not delete file in command='%s'" %
                          (commandline, ))
                 break
             elif match == 9:
                 log.Warn("Could not open file in command='%s'" %
                          (commandline, ))
                 break
         child.close(force=True)
         if child.exitstatus == 0:
             return res
         log.Warn("Running '%s' failed (attempt #%d)" % (commandline, n))
     log.Warn("Giving up trying to execute '%s' after %d attempts" %
              (commandline, globals.num_retries))
     raise BackendException("Error running '%s'" % commandline)
コード例 #8
0
ファイル: selection.py プロジェクト: hurlebouc/GDuplicity
    def Iterate(self, path):
        """Return iterator yielding paths in path

        This function looks a bit more complicated than it needs to be
        because it avoids extra recursion (and no extra function calls
        for non-directory files) while still doing the "directory
        scanning" bit.

        """
        def error_handler(exc, path, filename):
            fullpath = os.path.join(path.name, filename)
            try:
                mode = os.stat(fullpath)[stat.ST_MODE]
                if stat.S_ISSOCK(mode):
                    log.Info(
                        _("Skipping socket %s") % fullpath,
                        log.InfoCode.skipping_socket, util.escape(fullpath))
                else:
                    log.Warn(
                        _("Error initializing file %s") % fullpath,
                        log.WarningCode.cannot_iterate, util.escape(fullpath))
            except OSError:
                log.Warn(
                    _("Error accessing possibly locked file %s") % fullpath,
                    log.WarningCode.cannot_stat, util.escape(fullpath))
            return None

        def diryield(path):
            """Generate relevant files in directory path

            Returns (path, num) where num == 0 means path should be
            generated normally, num == 1 means the path is a directory
            and should be included iff something inside is included.

            """
            # todo: get around circular dependency issue by importing here
            from duplicity import robust  #@Reimport
            for filename in robust.listpath(path):
                new_path = robust.check_common_error(error_handler,
                                                     Path.append,
                                                     (path, filename))
                # make sure file is read accessible
                if (new_path and new_path.type in ["reg", "dir"]
                        and not os.access(new_path.name, os.R_OK)):
                    log.Warn(
                        _("Error accessing possibly locked file %s") %
                        new_path.name, log.WarningCode.cannot_read,
                        util.escape(new_path.name))
                    if diffdir.stats:
                        diffdir.stats.Errors += 1
                    new_path = None
                elif new_path:
                    s = self.Select(new_path)
                    if s == 1:
                        yield (new_path, 0)
                    elif s == 2 and new_path.isdir():
                        yield (new_path, 1)

        if not path.type:
            # base doesn't exist
            log.Warn(
                _("Warning: base %s doesn't exist, continuing") % path.name)
            return
        log.Debug(_("Selecting %s") % path.name)
        yield path
        if not path.isdir():
            return
        diryield_stack = [diryield(path)]
        delayed_path_stack = []

        while diryield_stack:
            try:
                subpath, val = diryield_stack[-1].next()
            except StopIteration:
                diryield_stack.pop()
                if delayed_path_stack:
                    delayed_path_stack.pop()
                continue
            if val == 0:
                if delayed_path_stack:
                    for delayed_path in delayed_path_stack:
                        log.Log(_("Selecting %s") % delayed_path.name, 6)
                        yield delayed_path
                    del delayed_path_stack[:]
                log.Debug(_("Selecting %s") % subpath.name)
                yield subpath
                if subpath.isdir():
                    diryield_stack.append(diryield(subpath))
            elif val == 1:
                delayed_path_stack.append(subpath)
                diryield_stack.append(diryield(subpath))
コード例 #9
0
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        # we expect an error return, so go low-level and ignore it
        try:
            p = os.popen(u"ncftpls -v")
            fout = p.read()
            ret = p.close()
        except Exception:
            pass
        # the expected error is 8 in the high-byte and some output
        if ret != 0x0800 or not fout:
            log.FatalError(u"NcFTP not found:  Please install NcFTP version 3.1.9 or later",
                           log.ErrorCode.ftp_ncftp_missing)

        # version is the second word of the first line
        version = fout.split(u'\n')[0].split()[1]
        if version < u"3.1.9":
            log.FatalError(u"NcFTP too old:  Duplicity requires NcFTP version 3.1.9,"
                           u"3.2.1 or later.  Version 3.2.0 will not work properly.",
                           log.ErrorCode.ftp_ncftp_too_old)
        elif version == u"3.2.0":
            log.Warn(u"NcFTP (ncftpput) version 3.2.0 may fail with duplicity.\n"
                     u"see: http://www.ncftpd.com/ncftp/doc/changelog.html\n"
                     u"If you have trouble, please upgrade to 3.2.1 or later",
                     log.WarningCode.ftp_ncftp_v320)
        log.Notice(u"NcFTP version is %s" % version)

        self.parsed_url = parsed_url

        self.url_string = duplicity.backend.strip_auth_from_url(self.parsed_url)

        # strip ncftp+ prefix
        self.url_string = duplicity.backend.strip_prefix(self.url_string, u'ncftp')

        # This squelches the "file not found" result from ncftpls when
        # the ftp backend looks for a collection that does not exist.
        # version 3.2.2 has error code 5, 1280 is some legacy value
        self.popen_breaks[u'ncftpls'] = [5, 1280]

        # Use an explicit directory name.
        if self.url_string[-1] != u'/':
            self.url_string += u'/'

        self.password = self.get_password()

        if config.ftp_connection == u'regular':
            self.conn_opt = u'-E'
        else:
            self.conn_opt = u'-F'

        self.tempfd, self.tempname = tempdir.default().mkstemp()
        self.tempfile = os.fdopen(self.tempfd, u"w")
        self.tempfile.write(u"host %s\n" % self.parsed_url.hostname)
        self.tempfile.write(u"user %s\n" % self.parsed_url.username)
        self.tempfile.write(u"pass %s\n" % self.password)
        self.tempfile.close()
        self.flags = u"-f %s %s -t %s -o useCLNT=0,useHELP_SITE=0 " % \
            (self.tempname, self.conn_opt, config.timeout)
        if parsed_url.port is not None and parsed_url.port != 21:
            self.flags += u" -P '%s'" % (parsed_url.port)
コード例 #10
0
        def inner_retry(self, *args):
            global _last_exception
            errors_fatal, errors_default = config.are_errors_fatal.get(
                operation, (True, None))
            for n in range(1, config.num_retries + 1):
                try:
                    return fn(self, *args)
                except FatalBackendException as e:
                    _last_exception = e
                    if not errors_fatal:
                        # backend wants to report and ignore errors
                        return errors_default
                    else:
                        # die on fatal errors
                        raise e
                except Exception as e:
                    _last_exception = e
                    if not errors_fatal:
                        # backend wants to report and ignore errors
                        return errors_default
                    else:
                        # retry on anything else
                        log.Debug(
                            _(u"Backtrace of previous error: %s") %
                            exception_traceback())
                        at_end = n == config.num_retries
                        code = _get_code_from_exception(
                            self.backend, operation, e)
                        if code == log.ErrorCode.backend_not_found:
                            # If we tried to do something, but the file just isn't there,
                            # no need to retry.
                            at_end = True
                        if at_end and fatal:

                            def make_filename(f):
                                if isinstance(f, path.ROPath):
                                    return util.escape(f.uc_name)
                                else:
                                    return util.escape(f)

                            extra = u' '.join([operation] + [
                                make_filename(x)
                                for x in args if (x and isinstance(x, str))
                            ])
                            log.FatalError(
                                _(u"Giving up after %s attempts. %s: %s") %
                                (n, e.__class__.__name__, util.uexc(e)),
                                code=code,
                                extra=extra)
                        else:
                            log.Warn(
                                _(u"Attempt %s failed. %s: %s") %
                                (n, e.__class__.__name__, util.uexc(e)))
                        if not at_end:
                            if isinstance(e, TemporaryLoadException):
                                time.sleep(3 * config.backend_retry_delay
                                           )  # wait longer before trying again
                            else:
                                time.sleep(config.backend_retry_delay
                                           )  # wait a bit before trying again
                            if hasattr(self.backend, u'_retry_cleanup'):
                                self.backend._retry_cleanup()
コード例 #11
0
ファイル: _boto_multi.py プロジェクト: hurlebouc/GDuplicity
class BotoBackend(duplicity.backend.Backend):
    """
    Backend for Amazon's Simple Storage System, (aka Amazon S3), though
    the use of the boto module, (http://code.google.com/p/boto/).

    To make use of this backend you must set aws_access_key_id
    and aws_secret_access_key in your ~/.boto or /etc/boto.cfg
    with your Amazon Web Services key id and secret respectively.
    Alternatively you can export the environment variables
    AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
    """
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        from boto.s3.key import Key
        from boto.s3.multipart import MultiPartUpload

        # This folds the null prefix and all null parts, which means that:
        #  //MyBucket/ and //MyBucket are equivalent.
        #  //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
        self.url_parts = filter(lambda x: x != '', parsed_url.path.split('/'))

        if self.url_parts:
            self.bucket_name = self.url_parts.pop(0)
        else:
            # Duplicity hangs if boto gets a null bucket name.
            # HC: Caught a socket error, trying to recover
            raise BackendException('Boto requires a bucket name.')

        self.scheme = parsed_url.scheme

        self.key_class = Key

        if self.url_parts:
            self.key_prefix = '%s/' % '/'.join(self.url_parts)
        else:
            self.key_prefix = ''

        self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
        self.parsed_url = parsed_url
        self.resetConnection()

    def resetConnection(self):
        self.bucket = None
        self.conn = get_connection(self.scheme, self.parsed_url)
        self.bucket = self.conn.lookup(self.bucket_name)

    def put(self, source_path, remote_filename=None):
        from boto.s3.connection import Location
        if globals.s3_european_buckets:
            if not globals.s3_use_new_style:
                log.FatalError(
                    "European bucket creation was requested, but not new-style "
                    "bucket addressing (--s3-use-new-style)",
                    log.ErrorCode.s3_bucket_not_style)
        #Network glitch may prevent first few attempts of creating/looking up a bucket
        for n in range(1, globals.num_retries + 1):
            if self.bucket:
                break
            if n > 1:
                time.sleep(30)
            try:
                try:
                    self.bucket = self.conn.get_bucket(self.bucket_name,
                                                       validate=True)
                except Exception, e:
                    if "NoSuchBucket" in str(e):
                        if globals.s3_european_buckets:
                            self.bucket = self.conn.create_bucket(
                                self.bucket_name, location=Location.EU)
                        else:
                            self.bucket = self.conn.create_bucket(
                                self.bucket_name)
                    else:
                        raise e
            except Exception, e:
                log.Warn(
                    "Failed to create bucket (attempt #%d) '%s' failed (reason: %s: %s)"
                    "" % (n, self.bucket_name, e.__class__.__name__, str(e)))
                self.resetConnection()

        if not remote_filename:
            remote_filename = source_path.get_filename()
        key = self.key_prefix + remote_filename
        for n in range(1, globals.num_retries + 1):
            if n > 1:
                # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
                time.sleep(10)

            if globals.s3_use_rrs:
                storage_class = 'REDUCED_REDUNDANCY'
            else:
                storage_class = 'STANDARD'
            log.Info("Uploading %s/%s to %s Storage" %
                     (self.straight_url, remote_filename, storage_class))
            try:
                headers = {
                    'Content-Type': 'application/octet-stream',
                    'x-amz-storage-class': storage_class
                }
                self.upload(source_path.name, key, headers)
                self.resetConnection()
                return
            except Exception, e:
                log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url, remote_filename, n,
                               e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s" %
                          (exception_traceback(), ))
                self.resetConnection()
コード例 #12
0
 def _delete(self, filename):
     file_id = self.id_by_name(filename)
     if file_id != '':
         self.drive.auth.service.files().delete(fileId=file_id).execute()
     else:
         log.Warn("File '%s' does not exist while trying to delete it" % (filename,))
コード例 #13
0
ファイル: robust.py プロジェクト: aminba90/PythonCourse
 def error_handler(exc):
     log.Warn(_(u"Error listing directory %s") % path.uc_name)
     return []
コード例 #14
0
                        raise BackendException("scp remote error: %s" % chan.recv(-1))
                    chan.sendall(f.read()+'\0')
                    f.close()
                    response=chan.recv(1)
                    if (response!="\0"):
                        raise BackendException("scp remote error: %s" % chan.recv(-1))
                    chan.close()
                    return
                else:
                    try:
                        self.sftp.put(source_path.name,remote_filename)
                        return
                    except Exception, e:
                        raise BackendException("sftp put of %s (as %s) failed: %s" % (source_path.name,remote_filename,e))
            except Exception, e:
                log.Warn("%s (Try %d of %d) Will retry in %d seconds." % (e,n,globals.num_retries,self.retry_delay))
        raise BackendException("Giving up trying to upload '%s' after %d attempts" % (remote_filename,n))


    def get(self, remote_filename, local_path):
        """retrieves a single file from the remote side.
        In scp mode unavoidable quoting issues will make this fail if the remote directory or file names
        contain single quotes."""
        
        for n in range(1, globals.num_retries+1):
            if n > 1:
                # sleep before retry
                time.sleep(self.retry_delay)
            try:
                if (globals.use_scp):
                    try:
コード例 #15
0
ファイル: sshbackend.py プロジェクト: jiaopengju/duplicity
 def run_scp_command(self, commandline):
     """ Run an scp command, responding to password prompts """
     for n in range(1, globals.num_retries + 1):
         if n > 1:
             # sleep before retry
             time.sleep(30)
         log.Info("Running '%s' (attempt #%d)" % (commandline, n))
         child = pexpect.spawn(commandline, timeout=None)
         if globals.ssh_askpass:
             state = "authorizing"
         else:
             state = "copying"
         while 1:
             if state == "authorizing":
                 match = child.expect([
                     pexpect.EOF, "(?i)timeout, server not responding",
                     "(?i)pass(word|phrase .*):", "(?i)permission denied",
                     "authenticity"
                 ])
                 log.Debug("State = %s, Before = '%s'" %
                           (state, child.before.strip()))
                 if match == 0:
                     log.Warn("Failed to authenticate")
                     break
                 elif match == 1:
                     log.Warn("Timeout waiting to authenticate")
                     break
                 elif match == 2:
                     child.sendline(self.password)
                     state = "copying"
                 elif match == 3:
                     log.Warn("Invalid SSH password")
                     break
                 elif match == 4:
                     log.Warn(
                         "Remote host authentication failed (missing known_hosts entry?)"
                     )
                     break
             elif state == "copying":
                 match = child.expect([
                     pexpect.EOF, "(?i)timeout, server not responding",
                     "stalled", "authenticity", "ETA"
                 ])
                 log.Debug("State = %s, Before = '%s'" %
                           (state, child.before.strip()))
                 if match == 0:
                     break
                 elif match == 1:
                     log.Warn("Timeout waiting for response")
                     break
                 elif match == 2:
                     state = "stalled"
                 elif match == 3:
                     log.Warn(
                         "Remote host authentication failed (missing known_hosts entry?)"
                     )
                     break
             elif state == "stalled":
                 match = child.expect([
                     pexpect.EOF, "(?i)timeout, server not responding",
                     "ETA"
                 ])
                 log.Debug("State = %s, Before = '%s'" %
                           (state, child.before.strip()))
                 if match == 0:
                     break
                 elif match == 1:
                     log.Warn("Stalled for too long, aborted copy")
                     break
                 elif match == 2:
                     state = "copying"
         child.close(force=True)
         if child.exitstatus == 0:
             return
         log.Warn("Running '%s' failed (attempt #%d)" % (commandline, n))
     log.Warn("Giving up trying to execute '%s' after %d attempts" %
              (commandline, globals.num_retries))
     raise BackendException("Error running '%s'" % commandline)
コード例 #16
0
ファイル: selection.py プロジェクト: hurlebouc/GDuplicity
        something_excluded, tuple_list = None, []
        separator = globals.null_separator and "\0" or "\n"
        for line in filelist_fp.read().split(separator):
            if not line:
                continue  # skip blanks
            try:
                tuple = self.filelist_parse_line(line, include)
            except FilePrefixError, exc:
                incr_warnings(exc)
                continue
            tuple_list.append(tuple)
            if not tuple[1]:
                something_excluded = 1
        if filelist_fp not in (sys.stdin, ) and filelist_fp.close():
            log.Warn(_("Error closing filelist %s") % filelist_name)
        return (tuple_list, something_excluded)

    def filelist_parse_line(self, line, include):
        """Parse a single line of a filelist, returning a pair

        pair will be of form (index, include), where index is another
        tuple, and include is 1 if the line specifies that we are
        including a file.  The default is given as an argument.
        prefix is the string that the index is relative to.

        """
        line = line.strip()
        if line[:2] == "+ ":
            # Check for "+ "/"- " syntax
            include = 1
コード例 #17
0
 def error_handler(exc):  # pylint: disable=unused-argument
     log.Warn(_(u"Error listing directory %s") % path.uc_name)
     return []
コード例 #18
0
            log.Info("Uploading '%s/%s' " % (self.container, remote_filename))
            try:
                sobject = self.container.create_object(remote_filename)
                sobject.load_from_filename(source_path.name)
                return
            except self.resp_exc, error:
                log.Warn(
                    "Upload of '%s' failed (attempt %d): CloudFiles returned: %s %s"
                    % (remote_filename, n, error.status, error.reason))
            except Exception, e:
                log.Warn("Upload of '%s' failed (attempt %s): %s: %s" %
                         (remote_filename, n, e.__class__.__name__, str(e)))
                log.Debug("Backtrace of previous error: %s" %
                          exception_traceback())
            time.sleep(30)
        log.Warn("Giving up uploading '%s' after %s attempts" %
                 (remote_filename, globals.num_retries))
        raise BackendException("Error uploading '%s'" % remote_filename)

    def get(self, remote_filename, local_path):
        for n in range(1, globals.num_retries + 1):
            log.Info("Downloading '%s/%s'" % (self.container, remote_filename))
            try:
                sobject = self.container.create_object(remote_filename)
                f = open(local_path.name, 'w')
                for chunk in sobject.stream():
                    f.write(chunk)
                local_path.setdata()
                return
            except self.resp_exc, resperr:
                log.Warn(
                    "Download of '%s' failed (attempt %s): CloudFiles returned: %s %s"
コード例 #19
0
            try:
                key.set_contents_from_filename(source_path.name, {'Content-Type': 'application/octet-stream',
                                                                  'x-amz-storage-class': storage_class})
                key.close()
                self.resetConnection()
                return
            except Exception, e:
                log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
                         "" % (self.straight_url,
                               remote_filename,
                               n,
                               e.__class__.__name__,
                               str(e)))
                log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
                self.resetConnection()
        log.Warn("Giving up trying to upload %s/%s after %d attempts" %
                 (self.straight_url, remote_filename, globals.num_retries))
        raise BackendException("Error uploading %s/%s" % (self.straight_url, remote_filename))

    def get(self, remote_filename, local_path):
        key = self.key_class(self.bucket)
        key.key = self.key_prefix + remote_filename
        for n in range(1, globals.num_retries+1):
            if n > 1:
                # sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
                time.sleep(10)
            log.Info("Downloading %s/%s" % (self.straight_url, remote_filename))
            try:
                key.get_contents_to_filename(local_path.name)
                local_path.setdata()
                self.resetConnection()
                return
コード例 #20
0
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
or see http://www.gnu.org/copyleft/lesser.html
"""

from builtins import object
import os
import sys
import fcntl

from duplicity import log

try:
    import threading
except ImportError:
    import dummy_threading  # @UnusedImport
    log.Warn(_(u"Threading not available -- zombie processes may appear"))

__author__ = u"Frank J. Tobin, [email protected]"
__version__ = u"0.3.2"
__revision__ = u"$Id: GnuPGInterface.py,v 1.6 2009/06/06 17:35:19 loafman Exp $"

# "standard" filehandles attached to processes
_stds = [u'stdin', u'stdout', u'stderr']

# the permissions each type of fh needs to be opened with
_fd_modes = {
    u'stdin': u'wb',
    u'stdout': u'rb',
    u'stderr': u'r',
    u'passphrase': u'w',
    u'command': u'w',
コード例 #21
0
 def check_renamed_files(self, file_list):
     if not self.user_authenticated():
         self.login()
     bad_list = [
         x for x in file_list
         if DPBX_AUTORENAMED_FILE_RE.search(x) is not None
     ]
     if len(bad_list) == 0:
         return
     log.Warn(u'-' * 72)
     log.Warn(
         u'Warning! It looks like there are automatically renamed files on backend'
     )
     log.Warn(
         u'They were probably created when using older version of duplicity.'
     )
     log.Warn(u'')
     log.Warn(
         u'Please check your backup consistency. Most likely you will need to choose'
     )
     log.Warn(
         u'largest file from duplicity-* (number).gpg and remove brackets from its name.'
     )
     log.Warn(u'')
     log.Warn(
         u'These files are not managed by duplicity at all and will not be')
     log.Warn(u'removed/rotated automatically.')
     log.Warn(u'')
     log.Warn(u'Affected files:')
     for x in bad_list:
         log.Warn(u'\t%s' % x)
     log.Warn(u'')
     log.Warn(u'In any case it\'s better to create full backup.')
     log.Warn(u'-' * 72)