Exemple #1
0
    def cleanup(self):
        """
        Cleanup any files created in the temporary directory (that
        have not been forgotten), and clean up the temporary directory
        itself.

        On failure they are logged, but this method will not raise an
        exception.
        """
        self.__lock.acquire()
        try:
            if self.__dir is not None:
                for file in self.__pending.keys():
                    try:
                        log.Debug(_("Removing still remembered temporary file %s") % util.fsdecode(file))
                        util.ignore_missing(os.unlink, file)
                    except Exception:
                        log.Info(_("Cleanup of temporary file %s failed") % util.fsdecode(file))
                        pass
                try:
                    os.rmdir(self.__dir)
                except Exception:
                    log.Warn(_("Cleanup of temporary directory %s failed - "
                               "this is probably a bug.") % util.fsdecode(self.__dir))
                    pass
                self.__pending = None
                self.__dir = None
        finally:
            self.__lock.release()
Exemple #2
0
def patch_diff_tarfile(base_path, diff_tarfile, restrict_index=()):
    """Patch given Path object using delta tarfile (as in tarfile.TarFile)

    If restrict_index is set, ignore any deltas in diff_tarfile that
    don't start with restrict_index.

    """
    if base_path.exists():
        path_iter = selection.Select(base_path).set_iter()
    else:
        path_iter = empty_iter()  # probably untarring full backup

    diff_path_iter = difftar2path_iter(diff_tarfile)
    if restrict_index:
        diff_path_iter = filter_path_iter(diff_path_iter, restrict_index)
    collated = diffdir.collate2iters(path_iter, diff_path_iter)

    ITR = IterTreeReducer(PathPatcher, [base_path])
    for basis_path, diff_ropath in collated:
        if basis_path:
            log.Info(_("Patching %s") % (util.fsdecode(basis_path.get_relative_path())),
                     log.InfoCode.patch_file_patching,
                     util.escape(basis_path.get_relative_path()))
            ITR(basis_path.index, basis_path, diff_ropath)
        else:
            log.Info(_("Patching %s") % (util.fsdecode(diff_ropath.get_relative_path())),
                     log.InfoCode.patch_file_patching,
                     util.escape(diff_ropath.get_relative_path()))
            ITR(diff_ropath.index, basis_path, diff_ropath)
    ITR.Finish()
    base_path.setdata()
Exemple #3
0
 def add_filelist(o, s, filename, p):
     select_opts.append((util.fsdecode(s), util.fsdecode(filename)))
     try:
         select_files.append(io.open(filename, "rt", encoding="UTF-8"))
     except IOError:
         log.FatalError(_("Error opening file %s") % filename,
                        log.ErrorCode.cant_open_filelist)
Exemple #4
0
def patch_diff_tarfile(base_path, diff_tarfile, restrict_index=()):
    u"""Patch given Path object using delta tarfile (as in tarfile.TarFile)

    If restrict_index is set, ignore any deltas in diff_tarfile that
    don't start with restrict_index.

    """
    if base_path.exists():
        path_iter = selection.Select(base_path).set_iter()
    else:
        path_iter = empty_iter()  # probably untarring full backup

    diff_path_iter = difftar2path_iter(diff_tarfile)
    if restrict_index:
        diff_path_iter = filter_path_iter(diff_path_iter, restrict_index)
    collated = diffdir.collate2iters(path_iter, diff_path_iter)

    ITR = IterTreeReducer(PathPatcher, [base_path])
    for basis_path, diff_ropath in collated:
        if basis_path:
            log.Info(
                _(u"Patching %s") %
                (util.fsdecode(basis_path.get_relative_path())),
                log.InfoCode.patch_file_patching,
                util.escape(basis_path.get_relative_path()))
            ITR(basis_path.index, basis_path, diff_ropath)
        else:
            log.Info(
                _(u"Patching %s") %
                (util.fsdecode(diff_ropath.get_relative_path())),
                log.InfoCode.patch_file_patching,
                util.escape(diff_ropath.get_relative_path()))
            ITR(diff_ropath.index, basis_path, diff_ropath)
    ITR.Finish()
    base_path.setdata()
Exemple #5
0
    def mktemp(self):
        u"""
        Return a unique filename suitable for use for a temporary
        file. The file is not created.

        Subsequent calls to this method are guaranteed to never return
        the same filename again. As a result, it is safe to use under
        concurrent conditions.

        NOTE: mkstemp() is greatly preferred.
        """
        filename = None

        self.__lock.acquire()
        try:
            self.__tempcount = self.__tempcount + 1
            suffix = u"-%d" % (self.__tempcount, )
            filename = tempfile.mktemp(suffix, u"mktemp-",
                                       util.fsdecode(self.__dir))

            log.Debug(
                _(u"Registering (mktemp) temporary file %s") %
                util.fsdecode(filename))
            self.__pending[filename] = None
        finally:
            self.__lock.release()

        return filename
Exemple #6
0
    def test_basic(self):
        u"""Check get/parse cycle"""
        dup_time.setprevtime(10)
        dup_time.setcurtime(20)

        file_naming.prepare_regex(force=True)
        filename = file_naming.get(u"inc", volume_number=23)
        log.Info(u"Inc filename: " + util.fsdecode(filename))
        pr = file_naming.parse(filename)
        assert pr and pr.type == u"inc", pr
        assert pr.start_time == 10
        assert pr.end_time == 20
        assert pr.volume_number == 23
        assert not pr.partial

        filename = file_naming.get(u"full-sig")
        log.Info(u"Full sig filename: " + util.fsdecode(filename))
        pr = file_naming.parse(filename)
        assert pr.type == u"full-sig"
        assert pr.time == 20
        assert not pr.partial

        filename = file_naming.get(u"new-sig")
        pr = file_naming.parse(filename)
        assert pr.type == u"new-sig"
        assert pr.start_time == 10
        assert pr.end_time == 20
        assert not pr.partial
 def delete(self):
     u"""
     Remove all files in set, both local and remote
     """
     rfn = self.get_filenames()
     rfn.reverse()
     try:
         self.backend.delete(rfn)
     except Exception:
         log.Debug(
             _(u"BackupSet.delete: missing %s") %
             [util.fsdecode(f) for f in rfn])
         pass
     if self.action != u"replicate":
         local_filename_list = config.archive_dir_path.listdir()
     else:
         local_filename_list = []
     for lfn in local_filename_list:
         pr = file_naming.parse(lfn)
         if (pr and pr.time == self.time
                 and pr.start_time == self.start_time
                 and pr.end_time == self.end_time):
             try:
                 config.archive_dir_path.append(lfn).delete()
             except Exception:
                 log.Debug(
                     _(u"BackupSet.delete: missing %s") %
                     [util.fsdecode(f) for f in lfn])
                 pass
     util.release_lockfile()
Exemple #8
0
 def _put(self, source_path, remote_filename):
     remote_filename = util.fsdecode(remote_filename)
     drive_file = self.file_by_name(remote_filename)
     if drive_file is None:
         # No existing file, make a new one
         create_file_args = {
             u'title': remote_filename,
             u'parents': [{
                 u"kind": u"drive#fileLink",
                 u"id": self.folder
             }]
         }
         create_file_args[u'parents'][0].update(self.api_params)
         drive_file = self.drive.CreateFile(create_file_args)
         log.Info(u"PyDrive backend: creating new file '%s'" %
                  (remote_filename, ))
     else:
         log.Info(
             u"PyDrive backend: replacing existing file '%s' with id '%s'" %
             (remote_filename, drive_file[u'id']))
     drive_file.SetContentFile(util.fsdecode(source_path.name))
     if self.shared_drive_id:
         drive_file.Upload(param={u'supportsTeamDrives': True})
     else:
         drive_file.Upload()
     self.id_cache[remote_filename] = drive_file[u'id']
Exemple #9
0
    def set_from_stat(self):
        u"""Set the value of self.type, self.mode from self.stat"""
        if not self.stat:
            self.type = None

        st_mode = self.stat.st_mode
        if stat.S_ISREG(st_mode):
            self.type = u"reg"
        elif stat.S_ISDIR(st_mode):
            self.type = u"dir"
        elif stat.S_ISLNK(st_mode):
            self.type = u"sym"
        elif stat.S_ISFIFO(st_mode):
            self.type = u"fifo"
        elif stat.S_ISSOCK(st_mode):
            raise PathException(util.fsdecode(self.get_relative_path()) +
                                u"is a socket, unsupported by tar")
            self.type = u"sock"
        elif stat.S_ISCHR(st_mode):
            self.type = u"chr"
        elif stat.S_ISBLK(st_mode):
            self.type = u"blk"
        else:
            raise PathException(u"Unknown type")

        self.mode = stat.S_IMODE(st_mode)
        if self.type in (u"chr", u"blk"):
            try:
                self.devnums = (os.major(self.stat.st_rdev),
                                os.minor(self.stat.st_rdev))
            except:
                log.Warn(_(u"Warning: %s invalid devnums (0x%X), treating as (0, 0).")
                         % (util.fsdecode(self.get_relative_path()), self.stat.st_rdev))
                self.devnums = (0, 0)
Exemple #10
0
    def unseal_status(self, u_remote_filenames):
        u"""
        Shows unsealing status for input volumes
        """
        one_object_not_unsealed = False
        objs = self.__list_objs(ffilter=lambda x: util.fsdecode(x[u'name']) in u_remote_filenames)
        max_duration = 0
        for o in objs:
            policy_retrieval_state = o[u'policy_retrieval_state']
            filename = util.fsdecode(o[u'name'])
            if policy_retrieval_state == u'sealed':
                log.Notice(u"Error: volume is still in sealed state : %s." % (filename))
                log.Notice(u"Launching unseal of volume %s." % (filename))
                self.unseal(o[u'name'])
                one_object_not_unsealed = True
            elif policy_retrieval_state == u"unsealing":
                duration = int(o[u'policy_retrieval_delay'])
                log.Info(u"%s available in %d seconds." % (filename, duration))
                if duration > max_duration:
                    max_duration = duration
                one_object_not_unsealed = True

        m, s = divmod(max_duration, 60)
        h, m = divmod(m, 60)
        max_duration_eta = u"%dh%02dm%02ds" % (h, m, s)
        log.Notice(u"Need to wait %s before all volumes are unsealed." % (max_duration_eta))
        return one_object_not_unsealed
    def forget(self, fname):
        u"""
        Forget about the given filename previously obtained through
        mktemp() or mkstemp(). This should be called *after* the file
        has been deleted, to stop a future cleanup() from trying to
        delete it.

        Forgetting is only needed for scaling purposes; that is, to
        avoid n timefile creations from implying that n filenames are
        kept in memory. Typically this whould never matter in
        duplicity, but for niceness sake callers are recommended to
        use this method whenever possible.
        """
        self.__lock.acquire()
        try:
            if fname in self.__pending:
                log.Debug(
                    _(u"Forgetting temporary file %s") % util.fsdecode(fname))
                del (self.__pending[fname])
            else:
                log.Warn(
                    _(u"Attempt to forget unknown tempfile %s - this is probably a bug."
                      ) % util.fsdecode(fname))
                pass
        finally:
            self.__lock.release()
Exemple #12
0
    def set_from_stat(self):
        """Set the value of self.type, self.mode from self.stat"""
        if not self.stat:
            self.type = None

        st_mode = self.stat.st_mode
        if stat.S_ISREG(st_mode):
            self.type = "reg"
        elif stat.S_ISDIR(st_mode):
            self.type = "dir"
        elif stat.S_ISLNK(st_mode):
            self.type = "sym"
        elif stat.S_ISFIFO(st_mode):
            self.type = "fifo"
        elif stat.S_ISSOCK(st_mode):
            raise PathException(util.fsdecode(self.get_relative_path()) +
                                u"is a socket, unsupported by tar")
            self.type = "sock"
        elif stat.S_ISCHR(st_mode):
            self.type = "chr"
        elif stat.S_ISBLK(st_mode):
            self.type = "blk"
        else:
            raise PathException("Unknown type")

        self.mode = stat.S_IMODE(st_mode)
        if self.type in ("chr", "blk"):
            try:
                self.devnums = (os.major(self.stat.st_rdev),
                                os.minor(self.stat.st_rdev))
            except:
                log.Warn(_("Warning: %s invalid devnums (0x%X), treating as (0, 0).")
                         % (util.fsdecode(self.get_relative_path()), self.stat.st_rdev))
                self.devnums = (0, 0)
Exemple #13
0
    def cleanup(self):
        u"""
        Cleanup any files created in the temporary directory (that
        have not been forgotten), and clean up the temporary directory
        itself.

        On failure they are logged, but this method will not raise an
        exception.
        """
        self.__lock.acquire()
        try:
            if self.__dir is not None:
                for file in list(self.__pending.keys()):
                    try:
                        log.Debug(_(u"Removing still remembered temporary file %s") % util.fsdecode(file))
                        util.ignore_missing(os.unlink, file)
                    except Exception:
                        log.Info(_(u"Cleanup of temporary file %s failed") % util.fsdecode(file))
                        pass
                try:
                    os.rmdir(self.__dir)
                except Exception:
                    log.Warn(_(u"Cleanup of temporary directory %s failed - "
                               u"this is probably a bug.") % util.fsdecode(self.__dir))
                    pass
                self.__pending = None
                self.__dir = None
        finally:
            self.__lock.release()
Exemple #14
0
 def delete(self):
     """
     Remove all files in set, both local and remote
     """
     rfn = self.get_filenames()
     rfn.reverse()
     try:
         self.backend.delete(rfn)
     except Exception:
         log.Debug(_("BackupSet.delete: missing %s") % [util.fsdecode(f) for f in rfn])
         pass
     if self.action not in ["collection-status", "replicate"]:
         local_filename_list = globals.archive_dir_path.listdir()
     else:
         local_filename_list = []
     for lfn in local_filename_list:
         pr = file_naming.parse(lfn)
         if (pr and pr.time == self.time and
                 pr.start_time == self.start_time and
                 pr.end_time == self.end_time):
             try:
                 globals.archive_dir_path.append(lfn).delete()
             except Exception:
                 log.Debug(_("BackupSet.delete: missing %s") % [util.fsdecode(f) for f in lfn])
                 pass
     util.release_lockfile()
Exemple #15
0
 def add_filelist(o, s, filename, p):
     select_opts.append((util.fsdecode(s), util.fsdecode(filename)))
     try:
         select_files.append(io.open(filename, "rt", encoding="UTF-8"))
     except IOError:
         log.FatalError(_("Error opening file %s") % filename,
                        log.ErrorCode.cant_open_filelist)
Exemple #16
0
 def _put(self, source_path, remote_filename):
     lp = util.fsdecode(source_path.name)
     if config.mp_segment_size > 0:
         from swiftclient.service import SwiftUploadObject
         st = os.stat(lp)
         # only upload using Dynamic Large Object if mpvolsize is triggered
         if st.st_size >= config.mp_segment_size:
             log.Debug(u"Uploading Dynamic Large Object")
             mp = self.svc.upload(
                 self.container, [
                     SwiftUploadObject(lp,
                                       object_name=self.prefix +
                                       util.fsdecode(remote_filename))
                 ],
                 options={u'segment_size': config.mp_segment_size})
             uploads = [a for a in mp if u'container' not in a[u'action']]
             for upload in uploads:
                 if not upload[u'success']:
                     raise BackendException(upload[u'traceback'])
             return
     rp = self.prefix + util.fsdecode(remote_filename)
     log.Debug(u"Uploading '%s' to '%s' in remote container '%s'" %
               (lp, rp, self.container))
     self.conn.put_object(container=self.container,
                          obj=self.prefix + util.fsdecode(remote_filename),
                          contents=open(lp, u'rb'))
Exemple #17
0
 def _put(self, source_path, remote_filename):
     source_pathname = util.fsdecode(source_path.name)
     remote_filename = util.fsdecode(remote_filename)
     commandline = u"%s copyto %s %s/%s" % (
         self.rclone_cmd, source_pathname, self.remote_path, remote_filename)
     rc, o, e = self._subprocess_safe_popen(commandline)
     if rc != 0:
         raise BackendException(e)
Exemple #18
0
 def _query(self, filename):
     u"""
     Get size info of filename
     """
     log.Log(u"Query: %s" % self.path + util.fsdecode(filename), log.INFO)
     file_version_info = self.file_info(quote_plus(self.path + util.fsdecode(filename), u'/'))
     return {u'size': file_version_info.size
             if file_version_info is not None and file_version_info.size is not None else -1}
Exemple #19
0
 def _get(self, remote_filename, local_path):
     u"""
     Download remote_filename to local_path
     """
     log.Log(u"Get: %s -> %s" % (self.path + util.fsdecode(remote_filename),
                                 util.fsdecode(local_path.name)),
             log.INFO)
     self.bucket.download_file_by_name(quote_plus(self.path + util.fsdecode(remote_filename), u'/'),
                                       DownloadDestLocalFile(local_path.name))
Exemple #20
0
    def _put(self, source_path, remote_filename):
        u'uploads file to Mega (deletes it first, to ensure it does not exist)'

        try:
            self.delete(util.fsdecode(remote_filename))
        except Exception:
            pass

        self.upload(local_file=util.fsdecode(source_path.get_canonical()),
                    remote_file=util.fsdecode(remote_filename))
Exemple #21
0
 def _query(self, filename):
     # use swiftservice to correctly report filesize in case of multipart uploads
     sobject = [
         a for a in self.svc.stat(self.container,
                                  [self.prefix + util.fsdecode(filename)])
     ][0]
     sobj = {u'size': int(sobject[u'headers'][u'content-length'])}
     log.Debug(u"Objectquery: '%s' has size %s." %
               (util.fsdecode(filename), sobj[u'size']))
     return sobj
Exemple #22
0
 def _get(self, remote_filename, local_path):
     remote_filename = util.fsdecode(remote_filename)
     local_pathname = util.fsdecode(local_path.name)
     commandline = u"%s copyto %s/%s %s" % (
         self.rclone_cmd, self.remote_path, remote_filename, local_pathname)
     rc, o, e = self._subprocess_safe_popen(commandline)
     if rc != 0:
         if os.path.isfile(local_pathname):
             os.remove(local_pathname)
         raise BackendException(e)
Exemple #23
0
    def get(self, remote_filename, local_path):
        u"""transfer remote_filename and the related .par2 file into
        a temp-dir. remote_filename will be renamed into local_path before
        finishing.

        If "par2 verify" detect an error transfer the Par2-volumes into the
        temp-dir and try to repair.
        """

        par2temp = local_path.get_temp_in_same_dir()
        par2temp.mkdir()
        local_path_temp = par2temp.append(remote_filename)

        self.wrapped_backend._get(remote_filename, local_path_temp)

        try:
            par2file = par2temp.append(remote_filename + b'.par2')
            self.wrapped_backend._get(par2file.get_filename(), par2file)

            par2verify = u'par2 v %s %s "%s"' % (
                self.common_options, util.fsdecode(par2file.get_canonical()),
                util.fsdecode(local_path_temp.get_canonical()))
            out, returncode = pexpect.run(par2verify, None, True)

            if returncode:
                log.Warn(u"File is corrupt. Try to repair %s" %
                         remote_filename)
                c = re.compile(u'%s\\.vol[\\d+]*\\.par2' %
                               remote_filename.decode())
                par2volumes = [
                    f for f in self.wrapped_backend._list()
                    if c.match(util.fsdecode(f))
                ]

                for filename in par2volumes:
                    file = par2temp.append(filename)
                    self.wrapped_backend._get(filename, file)

                par2repair = u'par2 r %s %s "%s"' % (
                    self.common_options, util.fsdecode(
                        par2file.get_canonical()),
                    util.fsdecode(local_path_temp.get_canonical()))
                out, returncode = pexpect.run(par2repair, None, True)

                if returncode:
                    log.Error(u"Failed to repair %s" % remote_filename)
                else:
                    log.Warn(u"Repair successful %s" % remote_filename)
        except BackendException:
            # par2 file not available
            pass
        finally:
            local_path_temp.rename(local_path)
            par2temp.deltree()
Exemple #24
0
 def _put(self, source_path, remote_filename):
     u"""
     Copy source_path to remote_filename
     """
     log.Log(u"Put: %s -> %s" % (util.fsdecode(source_path.name),
                                 self.path + util.fsdecode(remote_filename)),
             log.INFO)
     self.bucket.upload_local_file(util.fsdecode(source_path.name),
                                   quote_plus(self.path + util.fsdecode(remote_filename), u'/'),
                                   content_type=u'application/pgp-encrypted',
                                   progress_listener=B2ProgressListener())
Exemple #25
0
    def delete(self, filename):
        u"""delete given filename and its .par2 files
        """
        self.wrapped_backend._delete(filename)

        remote_list = self.unfiltered_list()

        c = re.compile(u'%s(?:\\.vol[\\d+]*)?\\.par2' %
                       util.fsdecode(filename))
        for remote_filename in remote_list:
            if c.match(util.fsdecode(remote_filename)):
                self.wrapped_backend._delete(util.fsencode(remote_filename))
Exemple #26
0
 def _put(self, source_path, remote_filename):
     if isinstance(remote_filename, b"".__class__):
         remote_filename = util.fsdecode(remote_filename)
     commandline = u"lftp -c \"source %s; mkdir -p %s; put %s -o %s\"" % (
         self.tempname, cmd_quote(
             self.remote_path), cmd_quote(source_path.uc_name),
         cmd_quote(self.remote_path) + util.fsdecode(remote_filename))
     log.Debug(u"CMD: %s" % commandline)
     s, l, e = self.subprocess_popen(commandline)
     log.Debug(u"STATUS: %s" % s)
     log.Debug(u"STDERR:\n" u"%s" % (e))
     log.Debug(u"STDOUT:\n" u"%s" % (l))
    def __init__(self, temproot=None):
        u"""
        Create a new TemporaryDirectory backed by a unique and
        securely created file system directory.

        tempbase - The temp root directory, or None to use system
        default (recommended).
        """
        def defaults_to_tmp(path):
            u'''Determine if path point to a MAcOS system tmp'''
            sys_temps = [
                os.path.realpath(u"/tmp"),
                os.path.realpath(u"/var/tmp"),
            ]

            user_temp = os.path.realpath(path)
            for sys_temp in sys_temps:
                if user_temp.startswith(sys_temp):
                    return True
            return False

        if temproot is None:
            if config.temproot:
                temproot = config.temproot
            else:
                global _initialSystemTempRoot
                temproot = _initialSystemTempRoot
        if isinstance(temproot, b"".__class__):
            temproot = util.fsdecode(temproot)

        if (platform.system().startswith(u'Darwin')
                and defaults_to_tmp(temproot)):
            # Use temp space from getconf, never /tmp
            temproot = subprocess.check_output(
                [u'getconf', u'DARWIN_USER_TEMP_DIR'])
            temproot = util.fsdecode(temproot).rstrip()

        self.__dir = tempfile.mkdtemp(u"-tempdir", u"duplicity-", temproot)

        log.Info(_(u"Using temporary directory %s") % self.__dir)

        # number of mktemp()/mkstemp() calls served so far
        self.__tempcount = 0
        # dict of paths pending deletion; use dict even though we are
        # not concearned with association, because it is unclear whether
        # sets are O(1), while dictionaries are.
        self.__pending = {}

        self.__lock = threading.Lock(
        )  # protect private resources *AND* mktemp/mkstemp calls
Exemple #28
0
 def get_remote_manifest(self):
     """
     Return manifest by reading remote manifest on backend
     """
     assert self.remote_manifest_name
     try:
         manifest_buffer = self.backend.get_data(self.remote_manifest_name)
     except GPGError as message:
         log.Error(_("Error processing remote manifest (%s): %s") %
                   (util.fsdecode(self.remote_manifest_name), util.uexc(message)))
         return None
     log.Info(_("Processing remote manifest %s (%s)") % (
         util.fsdecode(self.remote_manifest_name), len(manifest_buffer)))
     return manifest.Manifest().from_string(manifest_buffer)
Exemple #29
0
 def get_remote_manifest(self):
     u"""
     Return manifest by reading remote manifest on backend
     """
     assert self.remote_manifest_name
     try:
         manifest_buffer = self.backend.get_data(self.remote_manifest_name)
     except GPGError as message:
         log.Error(_(u"Error processing remote manifest (%s): %s") %
                   (util.fsdecode(self.remote_manifest_name), util.uexc(message)))
         return None
     log.Info(_(u"Processing remote manifest %s (%s)") % (
         util.fsdecode(self.remote_manifest_name), len(manifest_buffer)))
     return manifest.Manifest().from_string(manifest_buffer)
Exemple #30
0
    def transfer(self, method, source_path, remote_filename):
        u"""create Par2 files and transfer the given file and the Par2 files
        with the wrapped backend.

        Par2 must run on the real filename or it would restore the
        temp-filename later on. So first of all create a tempdir and symlink
        the soure_path with remote_filename into this.
        """
        par2temp = source_path.get_temp_in_same_dir()
        par2temp.mkdir()
        source_symlink = par2temp.append(remote_filename)
        source_target = source_path.get_canonical()
        if not os.path.isabs(source_target):
            source_target = os.path.join(util.fsencode(os.getcwd()),
                                         source_target)
        os.symlink(source_target, source_symlink.get_canonical())
        source_symlink.setdata()

        log.Info(u"Create Par2 recovery files")
        par2create = u'par2 c -r%d -n%d %s "%s"' % (
            self.redundancy, self.volumes, self.common_options,
            util.fsdecode(source_symlink.get_canonical()))
        out, returncode = pexpect.run(par2create, None, True)

        if returncode:
            log.Warn(
                u"Failed to create par2 file with requested options, retrying with -n1"
            )
            par2create = u'par2 c -r%d -n1 %s "%s"' % (
                self.redundancy, self.common_options,
                util.fsdecode(source_symlink.get_canonical()))
            out, returncode = pexpect.run(par2create, None, True)
            if not returncode:
                log.Warn(u"Successfully created par2 file with -n1")

        source_symlink.delete()
        files_to_transfer = []
        if not returncode:
            for file in par2temp.listdir():
                files_to_transfer.append(par2temp.append(file))
        else:
            log.Error(u"FAILED to create par2 file with returncode %d" %
                      returncode)

        method(source_path, remote_filename)
        for file in files_to_transfer:
            method(file, file.get_filename())

        par2temp.deltree()
Exemple #31
0
 def _get(self, remote_filename, local_path):
     u"""
     Download remote_filename to local_path
     """
     log.Log(
         u"Get: %s -> %s" % (self.path + util.fsdecode(remote_filename),
                             util.fsdecode(local_path.name)), log.INFO)
     if self.v_num < [1, 11, 0]:
         self.bucket.download_file_by_name(
             quote_plus(self.path + util.fsdecode(remote_filename), u'/'),
             DownloadDestLocalFile(local_path.name))
     else:
         df = self.bucket.download_file_by_name(
             quote_plus(self.path + util.fsdecode(remote_filename), u'/'))
         df.save_to(local_path.name)
Exemple #32
0
 def add_to_sets(filename):
     """
     Try adding filename to existing sets, or make new one
     """
     for set in sets:
         if set.add_filename(filename):
             log.Debug(_("File %s is part of known set") % (util.fsdecode(filename),))
             break
     else:
         log.Debug(_("File %s is not part of a known set; creating new set") % (util.fsdecode(filename),))
         new_set = BackupSet(self.backend, self.action)
         if new_set.add_filename(filename):
             sets.append(new_set)
         else:
             log.Debug(_("Ignoring file (rejected by backup set) '%s'") % util.fsdecode(filename))
    def add_filename(self, filename, pr=None):
        u"""
        Add a filename to given set.  Return true if it fits.

        The filename will match the given set if it has the right
        times and is of the right type.  The information will be set
        from the first filename given.

        @param filename: name of file to add
        @type filename: string

        @param pr: pre-computed result of file_naming.parse(filename)
        @type pr: Optional[ParseResults]
        """
        if not pr:
            pr = file_naming.parse(filename)
        if not pr or not (pr.type == u"full" or pr.type == u"inc"):
            return False

        if not self.info_set:
            self.set_info(pr)
        else:
            if pr.type != self.type:
                return False
            if pr.time != self.time:
                return False
            if (pr.start_time != self.start_time
                    or pr.end_time != self.end_time):
                return False
            if bool(pr.encrypted) != bool(self.encrypted):
                if self.partial and pr.encrypted:
                    self.encrypted = pr.encrypted

        if pr.manifest:
            self.set_manifest(filename)
        else:
            assert pr.volume_number is not None
            assert pr.volume_number not in self.volume_name_dict, \
                u"""Volume %d is already in the volume list as "%s".
                "%s" has the same volume number.
                Please check your command line and retry.""" % (
                    pr.volume_number,
                    util.fsdecode(self.volume_name_dict[pr.volume_number]),
                    util.fsdecode(filename)
                )
            self.volume_name_dict[pr.volume_number] = filename

        return True
Exemple #34
0
 def error_handler(exc, path, filename):
     fullpath = os.path.join(path.name, filename)
     try:
         mode = os.stat(fullpath)[stat.ST_MODE]
         if stat.S_ISSOCK(mode):
             log.Info(_(u"Skipping socket %s") % util.fsdecode(fullpath),
                      log.InfoCode.skipping_socket,
                      util.escape(fullpath))
         else:
             log.Warn(_(u"Error initializing file %s") % util.fsdecode(fullpath),
                      log.WarningCode.cannot_iterate,
                      util.escape(fullpath))
     except OSError:
         log.Warn(_(u"Error accessing possibly locked file %s") % util.fsdecode(fullpath),
                  log.WarningCode.cannot_stat, util.escape(fullpath))
     return None
Exemple #35
0
    def mktemp(self):
        """
        Return a unique filename suitable for use for a temporary
        file. The file is not created.

        Subsequent calls to this method are guaranteed to never return
        the same filename again. As a result, it is safe to use under
        concurrent conditions.

        NOTE: mkstemp() is greatly preferred.
        """
        filename = None

        self.__lock.acquire()
        try:
            self.__tempcount = self.__tempcount + 1
            suffix = "-%d" % (self.__tempcount,)
            filename = tempfile.mktemp(suffix, "mktemp-", self.__dir)

            log.Debug(_("Registering (mktemp) temporary file %s") % util.fsdecode(filename))
            self.__pending[filename] = None
        finally:
            self.__lock.release()

        return filename
Exemple #36
0
 def _query(self, filename):
     from gi.repository import Gio  # @UnresolvedImport  # pylint: disable=import-error
     target_file = self.remote_file.get_child_for_display_name(
         util.fsdecode(filename))
     info = target_file.query_info(Gio.FILE_ATTRIBUTE_STANDARD_SIZE,
                                   Gio.FileQueryInfoFlags.NONE, None)
     return {u'size': info.get_size()}
Exemple #37
0
 def _delete(self, remote_filename):
     remote_filename = util.fsdecode(remote_filename)
     commandline = u"%s deletefile --drive-use-trash=false %s/%s" % (
         self.rclone_cmd, self.remote_path, remote_filename)
     rc, o, e = self._subprocess_safe_popen(commandline)
     if rc != 0:
         raise BackendException(e)
Exemple #38
0
 def _get(self, remote_filename, local_path):
     url = self.directory + util.fsdecode(remote_filename)
     response = None
     try:
         target_file = local_path.open(u"wb")
         response = self.request(u"GET", url)
         if response.status == 200:
             # data=response.read()
             shutil.copyfileobj(response, target_file)
             # import hashlib
             # log.Info("WebDAV GOT %s bytes with md5=%s" %
             # (len(data),hashlib.md5(data).hexdigest()) )
             assert not target_file.close()
             response.close()
         else:
             status = response.status
             reason = response.reason
             response.close()
             raise BackendException(_(u"WebDAV GET Bad status code %s reason %s.") %
                                    (status, reason))
     except Exception as e:
         raise e
     finally:
         if response:
             response.close()
Exemple #39
0
 def can_fast_process(self, index, ropath):
     """Can fast process (no recursion) if ropath isn't a directory"""
     log.Info(_("Writing %s of type %s") %
              (util.fsdecode(ropath.get_relative_path()), ropath.type),
              log.InfoCode.patch_file_writing,
              "%s %s" % (util.escape(ropath.get_relative_path()), ropath.type))
     return not ropath.isdir()
Exemple #40
0
    def __init__(self, temproot=None):
        """
        Create a new TemporaryDirectory backed by a unique and
        securely created file system directory.

        tempbase - The temp root directory, or None to use system
        default (recommended).
        """
        if temproot is None:
            if globals.temproot:
                temproot = globals.temproot
            else:
                global _initialSystemTempRoot
                temproot = _initialSystemTempRoot
        self.__dir = tempfile.mkdtemp("-tempdir", "duplicity-", temproot)

        log.Info(_("Using temporary directory %s") % util.fsdecode(self.__dir))

        # number of mktemp()/mkstemp() calls served so far
        self.__tempcount = 0
        # dict of paths pending deletion; use dict even though we are
        # not concearned with association, because it is unclear whether
        # sets are O(1), while dictionaries are.
        self.__pending = {}

        self.__lock = threading.Lock()  # protect private resources *AND* mktemp/mkstemp calls
Exemple #41
0
    def get_backup_chains(self, filename_list):
        """
        Split given filename_list into chains

        Return value will be tuple (list of chains, list of sets, list
        of incomplete sets), where the list of sets will comprise sets
        not fitting into any chain, and the incomplete sets are sets
        missing files.
        """
        log.Debug(_("Extracting backup chains from list of files: %s")
                  % [util.fsdecode(f) for f in filename_list])
        # First put filenames in set form
        sets = []

        def add_to_sets(filename):
            """
            Try adding filename to existing sets, or make new one
            """
            for set in sets:
                if set.add_filename(filename):
                    log.Debug(_("File %s is part of known set") % (util.fsdecode(filename),))
                    break
            else:
                log.Debug(_("File %s is not part of a known set; creating new set") % (util.fsdecode(filename),))
                new_set = BackupSet(self.backend, self.action)
                if new_set.add_filename(filename):
                    sets.append(new_set)
                else:
                    log.Debug(_("Ignoring file (rejected by backup set) '%s'") % util.fsdecode(filename))

        for f in filename_list:
            add_to_sets(f)
        sets, incomplete_sets = self.get_sorted_sets(sets)

        chains, orphaned_sets = [], []

        def add_to_chains(set):
            """
            Try adding set to existing chains, or make new one
            """
            if set.type == "full":
                new_chain = BackupChain(self.backend)
                new_chain.set_full(set)
                chains.append(new_chain)
                log.Debug(_("Found backup chain %s") % (new_chain.short_desc()))
            else:
                assert set.type == "inc"
                for chain in chains:
                    if chain.add_inc(set):
                        log.Debug(_("Added set %s to pre-existing chain %s") % (set.get_timestr(),
                                                                                chain.short_desc()))
                        break
                else:
                    log.Debug(_("Found orphaned set %s") % (set.get_timestr(),))
                    orphaned_sets.append(set)
        for s in sets:
            add_to_chains(s)
        return (chains, orphaned_sets, incomplete_sets)
Exemple #42
0
 def log_prev_error(self, index):
     """Call function if no pending exception"""
     if not index:
         index_str = "."
     else:
         index_str = os.path.join(*index)
     log.Warn(_("Skipping %s because of previous error") % util.fsdecode(index_str),
              log.WarningCode.process_skipped,
              util.escape(index_str))
Exemple #43
0
def log_delta_path(delta_path, new_path=None, stats=None):
    """
    Look at delta path and log delta.  Add stats if new_path is set
    """
    if delta_path.difftype == "snapshot":
        if new_path and stats:
            stats.add_new_file(new_path)
        log.Info(_("A %s") %
                 (util.fsdecode(delta_path.get_relative_path())),
                 log.InfoCode.diff_file_new,
                 util.escape(delta_path.get_relative_path()))
    else:
        if new_path and stats:
            stats.add_changed_file(new_path)
        log.Info(_("M %s") %
                 (util.fsdecode(delta_path.get_relative_path())),
                 log.InfoCode.diff_file_changed,
                 util.escape(delta_path.get_relative_path()))
Exemple #44
0
 def on_error(self, exc, *args):
     """This is run on any exception in start/end-process"""
     self.caught_exception = 1
     if args and args[0] and isinstance(args[0], tuple):
         filename = os.path.join(*args[0])
     elif self.index:
         filename = os.path.join(*self.index)  # pylint: disable=not-an-iterable
     else:
         filename = "."
     log.Warn(_("Error '%s' processing %s") % (exc, util.fsdecode(filename)),
              log.WarningCode.cannot_process,
              util.escape(filename))
Exemple #45
0
def delta_iter_error_handler(exc, new_path, sig_path, sig_tar=None):
    """
    Called by get_delta_iter, report error in getting delta
    """
    if new_path:
        index_string = new_path.get_relative_path()
    elif sig_path:
        index_string = sig_path.get_relative_path()
    else:
        assert 0, "Both new and sig are None for some reason"
    log.Warn(_("Error %s getting delta for %s") % (str(exc), util.fsdecode(index_string)))
    return None
Exemple #46
0
def get_delta_iter(new_iter, sig_iter, sig_fileobj=None):
    """
    Generate delta iter from new Path iter and sig Path iter.

    For each delta path of regular file type, path.difftype with be
    set to "snapshot", "diff".  sig_iter will probably iterate ROPaths
    instead of Paths.

    If sig_fileobj is not None, will also write signatures to sig_fileobj.
    """
    collated = collate2iters(new_iter, sig_iter)
    if sig_fileobj:
        sigTarFile = util.make_tarfile("w", sig_fileobj)
    else:
        sigTarFile = None
    for new_path, sig_path in collated:
        log.Debug(_("Comparing %s and %s") % (new_path and util.uindex(new_path.index),
                                              sig_path and util.uindex(sig_path.index)))
        if not new_path or not new_path.type:
            # File doesn't exist (but ignore attempts to delete base dir;
            # old versions of duplicity could have written out the sigtar in
            # such a way as to fool us; LP: #929067)
            if sig_path and sig_path.exists() and sig_path.index != ():
                # but signature says it did
                log.Info(_("D %s") %
                         (util.fsdecode(sig_path.get_relative_path())),
                         log.InfoCode.diff_file_deleted,
                         util.escape(sig_path.get_relative_path()))
                if sigTarFile:
                    ti = ROPath(sig_path.index).get_tarinfo()
                    ti.name = "deleted/" + "/".join(sig_path.index)
                    sigTarFile.addfile(ti)
                stats.add_deleted_file(sig_path)
                yield ROPath(sig_path.index)
        elif not sig_path or new_path != sig_path:
            # Must calculate new signature and create delta
            delta_path = robust.check_common_error(delta_iter_error_handler,
                                                   get_delta_path,
                                                   (new_path, sig_path, sigTarFile))
            if delta_path:
                # log and collect stats
                log_delta_path(delta_path, new_path, stats)
                yield delta_path
            else:
                # if not, an error must have occurred
                stats.Errors += 1
        else:
            stats.add_unchanged_file(new_path)
    stats.close()
    if sigTarFile:
        sigTarFile.close()
Exemple #47
0
def get_index_from_tarinfo(tarinfo):
    """Return (index, difftype, multivol) pair from tarinfo object"""
    for prefix in ["snapshot/", "diff/", "deleted/",
                   "multivol_diff/", "multivol_snapshot/"]:
        tiname = util.get_tarinfo_name(tarinfo)
        if tiname.startswith(prefix):
            name = tiname[len(prefix):]  # strip prefix
            if prefix.startswith("multivol"):
                if prefix == "multivol_diff/":
                    difftype = "diff"
                else:
                    difftype = "snapshot"
                multivol = 1
                name, num_subs = \
                    re.subn("(?s)^multivol_(diff|snapshot)/?(.*)/[0-9]+$",
                            "\\2", tiname)
                if num_subs != 1:
                    raise PatchDirException(u"Unrecognized diff entry %s" %
                                            util.fsdecode(tiname))
            else:
                difftype = prefix[:-1]  # strip trailing /
                name = tiname[len(prefix):]
                if name.endswith("/"):
                    name = name[:-1]  # strip trailing /'s
                multivol = 0
            break
    else:
        raise PatchDirException(u"Unrecognized diff entry %s" %
                                util.fsdecode(tiname))
    if name == "." or name == "":
        index = ()
    else:
        index = tuple(name.split("/"))
        if '..' in index:
            raise PatchDirException(u"Tar entry %s contains '..'.  Security "
                                    "violation" % util.fsdecode(tiname))
    return (index, difftype, multivol)
Exemple #48
0
    def mkstemp(self):
        """
        Returns a filedescriptor and a filename, as per os.mkstemp(),
        but located in the temporary directory and subject to tracking
        and automatic cleanup.
        """
        fd = None
        filename = None

        self.__lock.acquire()
        try:
            self.__tempcount = self.__tempcount + 1
            suffix = "-%d" % (self.__tempcount,)
            fd, filename = tempfile.mkstemp(suffix, "mkstemp-", self.__dir)

            log.Debug(_("Registering (mkstemp) temporary file %s") % util.fsdecode(filename))
            self.__pending[filename] = None
        finally:
            self.__lock.release()

        return fd, filename
Exemple #49
0
    def __init__(self, base, index=()):
        """Path initializer"""
        # self.opened should be true if the file has been opened, and
        # self.fileobj can override returned fileobj
        self.opened, self.fileobj = None, None
        if isinstance(base, unicode):
            # For now (Python 2), it is helpful to know that all paths
            # are starting with bytes -- see note above util.fsencode definition
            base = util.fsencode(base)
        self.base = base

        # Create self.index, which is the path as a tuple
        self.index = self.rename_index(index)

        self.name = os.path.join(base, *self.index)

        # We converted any unicode base to filesystem encoding, so self.name should
        # be in filesystem encoding already and does not need to change
        self.uc_name = util.fsdecode(self.name)

        self.setdata()
Exemple #50
0
def integrate_patch_iters(iter_list):
    """Combine a list of iterators of ropath patches

    The iter_list should be sorted in patch order, and the elements in
    each iter_list need to be orderd by index.  The output will be an
    iterator of the final ROPaths in index order.

    """
    collated = collate_iters(iter_list)
    for patch_seq in collated:
        normalized = normalize_ps(patch_seq)
        try:
            final_ropath = patch_seq2ropath(normalized)
            if final_ropath.exists():
                # otherwise final patch was delete
                yield final_ropath
        except Exception as e:
            filename = normalized[-1].get_ropath().get_relative_path()
            log.Warn(_("Error '%s' patching %s") %
                     (util.uexc(e), util.fsdecode(filename)),
                     log.WarningCode.cannot_process,
                     util.escape(filename))
Exemple #51
0
    def forget(self, fname):
        """
        Forget about the given filename previously obtained through
        mktemp() or mkstemp(). This should be called *after* the file
        has been deleted, to stop a future cleanup() from trying to
        delete it.

        Forgetting is only needed for scaling purposes; that is, to
        avoid n timefile creations from implying that n filenames are
        kept in memory. Typically this whould never matter in
        duplicity, but for niceness sake callers are recommended to
        use this method whenever possible.
        """
        self.__lock.acquire()
        try:
            if fname in self.__pending:
                log.Debug(_("Forgetting temporary file %s") % util.fsdecode(fname))
                del(self.__pending[fname])
            else:
                log.Warn(_("Attempt to forget unknown tempfile %s - this is probably a bug.") % util.fsdecode(fname))
                pass
        finally:
            self.__lock.release()
Exemple #52
0
 def add_selection(o, option, additional_arg, p):
     select_opts.append((util.fsdecode(option), util.fsdecode(additional_arg)))
Exemple #53
0
 def log_diff(log_string):
     log_str = _("Difference found:") + u" " + log_string
     log.Notice(log_str % (util.fsdecode(self.get_relative_path())))
Exemple #54
0
    def get_tarinfo(self):
        """Generate a tarfile.TarInfo object based on self

        Doesn't set size based on stat, because we may want to replace
        data wiht other stream.  Size should be set separately by
        calling function.

        """
        ti = tarfile.TarInfo()
        if self.index:
            ti.name = "/".join(self.index)
        else:
            ti.name = "."
        if self.isdir():
            ti.name += "/"  # tar dir naming convention

        ti.size = 0
        if self.type:
            # Lots of this is specific to tarfile.py, hope it doesn't
            # change much...
            if self.isreg():
                ti.type = tarfile.REGTYPE
                ti.size = self.stat.st_size
            elif self.isdir():
                ti.type = tarfile.DIRTYPE
            elif self.isfifo():
                ti.type = tarfile.FIFOTYPE
            elif self.issym():
                ti.type = tarfile.SYMTYPE
                ti.linkname = self.symtext
            elif self.isdev():
                if self.type == "chr":
                    ti.type = tarfile.CHRTYPE
                else:
                    ti.type = tarfile.BLKTYPE
                ti.devmajor, ti.devminor = self.devnums
            else:
                raise PathException("Unrecognized type " + str(self.type))

            ti.mode = self.mode
            ti.uid, ti.gid = self.stat.st_uid, self.stat.st_gid
            if self.stat.st_mtime < 0:
                log.Warn(_("Warning: %s has negative mtime, treating as 0.")
                         % (util.fsdecode(self.get_relative_path())))
                ti.mtime = 0
            else:
                ti.mtime = int(self.stat.st_mtime)

            try:
                ti.uname = cached_ops.getpwuid(ti.uid)[0]
            except KeyError:
                ti.uname = ''
            try:
                ti.gname = cached_ops.getgrgid(ti.gid)[0]
            except KeyError:
                ti.gname = ''

            if ti.type in (tarfile.CHRTYPE, tarfile.BLKTYPE):
                if hasattr(os, "major") and hasattr(os, "minor"):
                    ti.devmajor, ti.devminor = self.devnums
        else:
            # Currently we depend on an uninitiliazed tarinfo file to
            # already have appropriate headers.  Still, might as well
            # make sure mode and size set.
            ti.mode, ti.size = 0, 0
        return ti