Пример #1
0
 def _apply_linux_attr_rec(self, path, restore_numeric_ids=False):
     if self.linux_attr:
         if not set_linux_file_attr:
             add_error("%s: can't restore linuxattrs: "
                       "linuxattr support missing.\n" % path)
             return
         set_linux_file_attr(path, self.linux_attr)
Пример #2
0
def _recursive_dirlist(prepend, xdev, bup_dir=None,
                       excluded_paths=None,
                       exclude_rxs=None,
                       xdev_exceptions=frozenset()):
    for (name,pst) in _dirlist():
        path = prepend + name
        if excluded_paths:
            if os.path.normpath(path) in excluded_paths:
                debug1('Skipping %r: excluded.\n' % path)
                continue
        if exclude_rxs and should_rx_exclude_path(path, exclude_rxs):
            continue
        if name.endswith('/'):
            if bup_dir != None:
                if os.path.normpath(path) == bup_dir:
                    debug1('Skipping BUP_DIR.\n')
                    continue
            if xdev != None and pst.st_dev != xdev \
               and path not in xdev_exceptions:
                debug1('Skipping contents of %r: different filesystem.\n' % path)
            else:
                try:
                    OsFile(name).fchdir()
                except OSError as e:
                    add_error('%s: %s' % (prepend, e))
                else:
                    for i in _recursive_dirlist(prepend=prepend+name, xdev=xdev,
                                                bup_dir=bup_dir,
                                                excluded_paths=excluded_paths,
                                                exclude_rxs=exclude_rxs,
                                                xdev_exceptions=xdev_exceptions):
                        yield i
                    os.chdir('..')
        yield (path, pst)
Пример #3
0
 def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False):
     if not xattr:
         if self.linux_xattr:
             add_error("%s: can't restore xattr; xattr support missing.\n"
                       % path)
         return
     existing_xattrs = set(xattr.list(path, nofollow=True))
     if self.linux_xattr:
         for k, v in self.linux_xattr:
             if k not in existing_xattrs \
                     or v != xattr.get(path, k, nofollow=True):
                 try:
                     xattr.set(path, k, v, nofollow=True)
                 except IOError, e:
                     if e.errno == errno.EPERM \
                             or e.errno == errno.EOPNOTSUPP:
                         raise ApplyError('xattr.set: %s' % e)
                     else:
                         raise
             existing_xattrs -= frozenset([k])
         for k in existing_xattrs:
             try:
                 xattr.remove(path, k, nofollow=True)
             except IOError, e:
                 if e.errno == errno.EPERM:
                     raise ApplyError('xattr.remove: %s' % e)
                 else:
                     raise
Пример #4
0
def _recursive_dirlist(prepend, xdev, bup_dir=None, excluded_paths=None, exclude_rxs=None):
    for (name, pst) in _dirlist():
        path = prepend + name
        if excluded_paths:
            if os.path.normpath(path) in excluded_paths:
                debug1("Skipping %r: excluded.\n" % path)
                continue
        if exclude_rxs and should_rx_exclude_path(path, exclude_rxs):
            continue
        if name.endswith("/"):
            if bup_dir != None:
                if os.path.normpath(path) == bup_dir:
                    debug1("Skipping BUP_DIR.\n")
                    continue
            if xdev != None and pst.st_dev != xdev:
                debug1("Skipping contents of %r: different filesystem.\n" % path)
            else:
                try:
                    OsFile(name).fchdir()
                except OSError as e:
                    add_error("%s: %s" % (prepend, e))
                else:
                    for i in _recursive_dirlist(
                        prepend=prepend + name,
                        xdev=xdev,
                        bup_dir=bup_dir,
                        excluded_paths=excluded_paths,
                        exclude_rxs=exclude_rxs,
                    ):
                        yield i
                    os.chdir("..")
        yield (path, pst)
Пример #5
0
def extract(file, restore_numeric_ids=False, create_symlinks=True):
    # For now, just store all the directories and handle them last,
    # longest first.
    all_dirs = []
    for meta in _ArchiveIterator(file):
        if not meta:  # Hit end record.
            break
        xpath = _clean_up_extract_path(meta.path)
        if not xpath:
            add_error(Exception('skipping risky path "%s"' % meta.path))
        else:
            meta.path = xpath
            if verbose:
                print >> sys.stderr, "+", meta.path
            _set_up_path(meta, create_symlinks=create_symlinks)
            if os.path.isdir(meta.path):
                all_dirs.append(meta)
            else:
                if verbose:
                    print >> sys.stderr, "=", meta.path
                meta.apply_to_path(restore_numeric_ids=restore_numeric_ids)
    all_dirs.sort(key=lambda x: len(x.path), reverse=True)
    for dir in all_dirs:
        # Don't need to check xpath -- won't be in all_dirs if not OK.
        xpath = _clean_up_extract_path(dir.path)
        if verbose:
            print >> sys.stderr, "=", xpath
        # Shouldn't have to check for risky paths here (omitted above).
        dir.apply_to_path(path=dir.path, restore_numeric_ids=restore_numeric_ids)
Пример #6
0
def ruin_bloom(bloomfilename):
    rbloomfilename = git.repo_rel(bloomfilename)
    if not os.path.exists(bloomfilename):
        log("%s\n" % bloomfilename)
        add_error("bloom: %s not found to ruin\n" % rbloomfilename)
        return
    b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1)
    b.map[16:16+2**b.bits] = '\0' * 2**b.bits
Пример #7
0
def start_extract(file, create_symlinks=True):
    for meta in _ArchiveIterator(file):
        if verbose:
            print >> sys.stderr, meta.path
        xpath = _clean_up_extract_path(meta.path)
        if not xpath:
            add_error(Exception('skipping risky path "%s"' % meta.path))
        else:
            meta.path = xpath
            _set_up_path(meta, create_symlinks=create_symlinks)
Пример #8
0
 def _apply_linux_attr_rec(self, path, restore_numeric_ids=False):
     if self.linux_attr:
         if not set_linux_file_attr:
             add_error("%s: can't restore linuxattrs: " "linuxattr support missing.\n" % path)
             return
         try:
             set_linux_file_attr(path, self.linux_attr)
         except OSError, e:
             if e.errno in (errno.ENOTTY, errno.EOPNOTSUPP, errno.ENOSYS, errno.EACCES):
                 raise ApplyError("Linux chattr: %s (0x%s)" % (e, hex(self.linux_attr)))
             else:
                 raise
Пример #9
0
Файл: rm.py Проект: kronenpj/bup
def bup_rm(paths, compression=6, verbosity=None):
    root = vfs.RefList(None)

    dead_branches, dead_saves = dead_items(root, paths)
    die_if_errors('not proceeding with any removals\n')

    updated_refs = {}  # ref_name -> (original_ref, tip_commit(bin))

    for branch, node in dead_branches.iteritems():
        ref = 'refs/heads/' + branch
        assert(not ref in updated_refs)
        updated_refs[ref] = (node.hash, None)

    if dead_saves:
        writer = git.PackWriter(compression_level=compression)
        try:
            for branch, saves in dead_saves.iteritems():
                assert(saves)
                updated_refs['refs/heads/' + branch] = rm_saves(saves, writer)
        except:
            if writer:
                writer.abort()
            raise
        else:
            if writer:
                # Must close before we can update the ref(s) below.
                writer.close()

    # Only update the refs here, at the very end, so that if something
    # goes wrong above, the old refs will be undisturbed.  Make an attempt
    # to update each ref.
    for ref_name, info in updated_refs.iteritems():
        orig_ref, new_ref = info
        try:
            if not new_ref:
                git.delete_ref(ref_name, orig_ref.encode('hex'))
            else:
                git.update_ref(ref_name, new_ref, orig_ref)
                if verbosity:
                    new_hex = new_ref.encode('hex')
                    if orig_ref:
                        orig_hex = orig_ref.encode('hex')
                        log('updated %r (%s -> %s)\n'
                            % (ref_name, orig_hex, new_hex))
                    else:
                        log('updated %r (%s)\n' % (ref_name, new_hex))
        except (git.GitError, ClientError) as ex:
            if new_ref:
                add_error('while trying to update %r (%s -> %s): %s'
                          % (ref_name, orig_ref, new_ref, ex))
            else:
                add_error('while trying to delete %r (%s): %s'
                          % (ref_name, orig_ref, ex))
Пример #10
0
 def _add_linux_attr(self, path, st):
     if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode):
         try:
             attr = get_linux_file_attr(path)
             if(attr != 0):
                 self.linux_attr = attr
         except IOError, e:
             if e.errno == errno.EACCES:
                 add_error('read Linux attr: %s' % e)
             elif e.errno == errno.ENOTTY: # Inappropriate ioctl for device.
                 add_error('read Linux attr: %s' % e)
             else:
                 raise
Пример #11
0
Файл: index.py Проект: bup/bup
def _slashappend_or_add_error(p, caller):
    """Return p, after ensuring it has a single trailing slash if it names
    a directory, unless there's an OSError, in which case, call
    add_error() and return None."""
    try:
        st = os.lstat(p)
    except OSError as e:
        add_error('%s: %s' % (caller, e))
        return None
    else:
        if stat.S_ISDIR(st.st_mode):
            return slashappend(p)
        return p
Пример #12
0
def _dirlist():
    l = []
    for n in os.listdir("."):
        try:
            st = xstat.lstat(n)
        except OSError as e:
            add_error(Exception("%s: %s" % (resolve_parent(n), str(e))))
            continue
        if (st.st_mode & _IFMT) == stat.S_IFDIR:
            n += "/"
        l.append((n, st))
    l.sort(reverse=True)
    return l
Пример #13
0
 def _apply_linux_attr_rec(self, path, restore_numeric_ids=False):
     if self.linux_attr:
         if not set_linux_file_attr:
             add_error("%s: can't restore linuxattrs: "
                       "linuxattr support missing.\n" % path)
             return
         try:
             set_linux_file_attr(path, self.linux_attr)
         except OSError, e:
             if e.errno == errno.ENOTTY:
                 raise ApplyError('Linux chattr: %s' % e)
             else:
                 raise
Пример #14
0
 def _add_common(self, path, st):
     self.mode = st.st_mode
     self.uid = st.st_uid
     self.gid = st.st_gid
     self.rdev = st.st_rdev
     self.atime = st.st_atime
     self.mtime = st.st_mtime
     self.ctime = st.st_ctime
     self.owner = self.group = ''
     try:
         self.owner = pwd.getpwuid(st.st_uid)[0]
     except KeyError, e:
         add_error("no user name for id %s '%s'" % (st.st_gid, path))
Пример #15
0
 def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False):
     if not xattr:
         if self.linux_xattr:
             add_error("%s: can't restore xattr; xattr support missing.\n" % path)
         return
     if not self.linux_xattr:
         return
     try:
         existing_xattrs = set(xattr.list(path, nofollow=True))
     except IOError, e:
         if e.errno == errno.EACCES:
             raise ApplyError("xattr.set: %s" % e)
         else:
             raise
Пример #16
0
 def read_ids():
     while 1:
         line = sys.stdin.readline()
         if not line:
             break
         if line:
             line = line.strip()
         try:
             it = cp.get(line.strip())
             next(it, None)  # skip the file type
         except KeyError as e:
             add_error('error: %s' % e)
             continue
         yield IterToFile(it)
Пример #17
0
def check_midx(name):
    nicename = git.repo_rel(name)
    log('Checking %s.\n' % nicename)
    try:
        ix = git.open_idx(name)
    except git.GitError as e:
        add_error('%s: %s' % (name, e))
        return
    for count,subname in enumerate(ix.idxnames):
        sub = git.open_idx(os.path.join(os.path.dirname(name), subname))
        for ecount,e in enumerate(sub):
            if not (ecount % 1234):
                qprogress('  %d/%d: %s %d/%d\r' 
                          % (count, len(ix.idxnames),
                             git.shorten_hash(subname), ecount, len(sub)))
            if not sub.exists(e):
                add_error("%s: %s: %s missing from idx"
                          % (nicename, git.shorten_hash(subname),
                             str(e).encode('hex')))
            if not ix.exists(e):
                add_error("%s: %s: %s missing from midx"
                          % (nicename, git.shorten_hash(subname),
                             str(e).encode('hex')))
    prev = None
    for ecount,e in enumerate(ix):
        if not (ecount % 1234):
            qprogress('  Ordering: %d/%d\r' % (ecount, len(ix)))
        if not e >= prev:
            add_error('%s: ordering error: %s < %s'
                      % (nicename,
                         str(e).encode('hex'), str(prev).encode('hex')))
        prev = e
Пример #18
0
 def apply_to_path(self, path=None, restore_numeric_ids=False):
     # apply metadata to path -- file must exist
     if not path:
         path = self.path
     if not path:
         raise Exception('Metadata.apply_to_path() called with no path');
     num_ids = restore_numeric_ids
     try:
         self._apply_common_rec(path, restore_numeric_ids=num_ids)
         self._apply_posix1e_acl_rec(path, restore_numeric_ids=num_ids)
         self._apply_linux_attr_rec(path, restore_numeric_ids=num_ids)
         self._apply_linux_xattr_rec(path, restore_numeric_ids=num_ids)
     except ApplyError, e:
         add_error(e)
Пример #19
0
 def _add_linux_attr(self, path, st):
     if not get_linux_file_attr: return
     if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode):
         try:
             attr = get_linux_file_attr(path)
             if attr != 0:
                 self.linux_attr = attr
         except OSError, e:
             if e.errno == errno.EACCES:
                 add_error('read Linux attr: %s' % e)
             elif e.errno in (errno.ENOTTY, errno.ENOSYS, errno.EOPNOTSUPP):
                 # Assume filesystem doesn't support attrs.
                 return
             else:
                 raise
Пример #20
0
 def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False):
     if not posix1e:
         if self.posix1e_acl:
             add_error("%s: can't restore ACLs; posix1e support missing.\n" % path)
         return
     if self.posix1e_acl:
         acls = self.posix1e_acl
         if len(acls) > 2:
             if restore_numeric_ids:
                 acls[3].applyto(path, posix1e.ACL_TYPE_DEFAULT)
             else:
                 acls[2].applyto(path, posix1e.ACL_TYPE_DEFAULT)
         if restore_numeric_ids:
             acls[1].applyto(path, posix1e.ACL_TYPE_ACCESS)
         else:
             acls[0].applyto(path, posix1e.ACL_TYPE_ACCESS)
Пример #21
0
 def _add_linux_attr(self, path, st):
     if not get_linux_file_attr: return
     if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode):
         try:
             attr = get_linux_file_attr(path)
             if attr != 0:
                 self.linux_attr = attr
         except OSError, e:
             if e.errno == errno.EACCES:
                 add_error('read Linux attr: %s' % e)
             elif e.errno == errno.ENOTTY or e.errno == errno.ENOSYS:
                 # ENOTTY: Function not implemented.
                 # ENOSYS: Inappropriate ioctl for device.
                 # Assume filesystem doesn't support attrs.
                 return
             else:
                 raise
Пример #22
0
def recursive_dirlist(paths, xdev, bup_dir=None,
                      excluded_paths=None,
                      exclude_rxs=None,
                      xdev_exceptions=frozenset()):
    startdir = OsFile('.')
    try:
        assert(type(paths) != type(''))
        for path in paths:
            try:
                pst = xstat.lstat(path)
                if stat.S_ISLNK(pst.st_mode):
                    yield (path, pst)
                    continue
            except OSError as e:
                add_error('recursive_dirlist: %s' % e)
                continue
            try:
                pfile = OsFile(path)
            except OSError as e:
                add_error(e)
                continue
            pst = pfile.stat()
            if xdev:
                xdev = pst.st_dev
            else:
                xdev = None
            if stat.S_ISDIR(pst.st_mode):
                pfile.fchdir()
                prepend = os.path.join(path, '')
                for i in _recursive_dirlist(prepend=prepend, xdev=xdev,
                                            bup_dir=bup_dir,
                                            excluded_paths=excluded_paths,
                                            exclude_rxs=exclude_rxs,
                                            xdev_exceptions=xdev_exceptions):
                    yield i
                startdir.fchdir()
            else:
                prepend = path
            yield (prepend,pst)
    except:
        try:
            startdir.fchdir()
        except:
            pass
        raise
Пример #23
0
 def apply_to_path(self, path=None, restore_numeric_ids=False):
     # apply metadata to path -- file must exist
     if not path:
         path = self.path
     if not path:
         raise Exception('Metadata.apply_to_path() called with no path')
     if not self._recognized_file_type():
         add_error('not applying metadata to "%s"' % path
                   + ' with unrecognized mode "0x%x"\n' % self.mode)
         return
     num_ids = restore_numeric_ids
     try:
         self._apply_common_rec(path, restore_numeric_ids=num_ids)
         self._apply_posix1e_acl_rec(path, restore_numeric_ids=num_ids)
         self._apply_linux_attr_rec(path, restore_numeric_ids=num_ids)
         self._apply_linux_xattr_rec(path, restore_numeric_ids=num_ids)
     except ApplyError, e:
         add_error(e)
Пример #24
0
 def _apply_linux_attr_rec(self, path, restore_numeric_ids=False):
     if self.linux_attr:
         check_linux_file_attr_api()
         if not set_linux_file_attr:
             add_error("%s: can't restore linuxattrs: "
                       "linuxattr support missing.\n" % path)
             return
         try:
             set_linux_file_attr(path, self.linux_attr)
         except OSError as e:
             if e.errno in (EACCES, ENOTTY, EOPNOTSUPP, ENOSYS):
                 raise ApplyError('Linux chattr: %s (0x%s)'
                                  % (e, hex(self.linux_attr)))
             elif e.errno == EINVAL:
                 msg = "if you're not using ntfs-3g, please report"
                 raise ApplyError('Linux chattr: %s (0x%s) (%s)'
                                  % (e, hex(self.linux_attr), msg))
             else:
                 raise
Пример #25
0
def finish_extract(file, restore_numeric_ids=False):
    all_dirs = []
    for meta in _ArchiveIterator(file):
        xpath = _clean_up_extract_path(meta.path)
        if not xpath:
            add_error(Exception('skipping risky path "%s"' % dir.path))
        else:
            if os.path.isdir(meta.path):
                all_dirs.append(meta)
            else:
                if verbose:
                    print >> sys.stderr, meta.path
                meta.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids)
    all_dirs.sort(key=lambda x: len(x.path), reverse=True)
    for dir in all_dirs:
        # Don't need to check xpath -- won't be in all_dirs if not OK.
        xpath = _clean_up_extract_path(dir.path)
        if verbose:
            print >> sys.stderr, dir.path
        dir.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids)
Пример #26
0
def check_bloom(path, bloomfilename, idx):
    rbloomfilename = git.repo_rel(bloomfilename)
    ridx = git.repo_rel(idx)
    if not os.path.exists(bloomfilename):
        log("bloom: %s: does not exist.\n" % rbloomfilename)
        return
    b = bloom.ShaBloom(bloomfilename)
    if not b.valid():
        add_error("bloom: %r is invalid.\n" % rbloomfilename)
        return
    base = os.path.basename(idx)
    if base not in b.idxnames:
        log("bloom: %s does not contain the idx.\n" % rbloomfilename)
        return
    if base == idx:
        idx = os.path.join(path, idx)
    log("bloom: bloom file: %s\n" % rbloomfilename)
    log("bloom:   checking %s\n" % ridx)
    for objsha in git.open_idx(idx):
        if not b.exists(objsha):
            add_error("bloom: ERROR: object %s missing" % str(objsha).encode("hex"))
Пример #27
0
Файл: rm.py Проект: kronenpj/bup
def dead_items(vfs_top, paths):
    """Return an optimized set of removals, reporting errors via
    add_error, and if there are any errors, return None, None."""
    dead_branches = {}
    dead_saves = {}
    # Scan for bad requests, and opportunities to optimize
    for path in paths:
        try:
            n = vfs_top.lresolve(path)
        except vfs.NodeError as e:
            add_error('unable to resolve %s: %s' % (path, e))
        else:
            if isinstance(n, vfs.BranchList): # rm /foo
                branchname = n.name
                dead_branches[branchname] = n
                dead_saves.pop(branchname, None) # rm /foo obviates rm /foo/bar
            elif isinstance(n, vfs.FakeSymlink) and isinstance(n.parent,
                                                               vfs.BranchList):
                if n.name == 'latest':
                    add_error("error: cannot delete 'latest' symlink")
                else:
                    branchname = n.parent.name
                    if branchname not in dead_branches:
                        dead_saves.setdefault(branchname, []).append(n)
            else:
                add_error("don't know how to remove %r yet" % n.fullname())
    if saved_errors:
        return None, None
    return dead_branches, dead_saves
Пример #28
0
def _pop(force_tree, dir_metadata=None):
    # Leave the current archive directory and add its tree to its parent.
    assert(len(parts) >= 1)
    part = parts.pop()
    shalist = shalists.pop()
    metalist = metalists.pop()
    if metalist and not force_tree:
        if dir_metadata: # Override the original metadata pushed for this dir.
            metalist = [('', dir_metadata)] + metalist[1:]
        sorted_metalist = sorted(metalist, key = lambda x : x[0])
        metadata = ''.join([m[1].encode() for m in sorted_metalist])
        metadata_f = BytesIO(metadata)
        mode, id = hashsplit.split_to_blob_or_tree(w.new_blob, w.new_tree,
                                                   [metadata_f],
                                                   keep_boundaries=False)
        shalist.append((mode, '.bupm', id))
    # FIXME: only test if collision is possible (i.e. given --strip, etc.)?
    if force_tree:
        tree = force_tree
    else:
        names_seen = set()
        clean_list = []
        for x in shalist:
            name = x[1]
            if name in names_seen:
                parent_path = '/'.join(parts) + '/'
                add_error('error: ignoring duplicate path %r in %r'
                          % (name, parent_path))
            else:
                names_seen.add(name)
                clean_list.append(x)
        tree = w.new_tree(clean_list)
    if shalists:
        shalists[-1].append((GIT_MODE_TREE,
                             git.mangle_name(part,
                                             GIT_MODE_TREE, GIT_MODE_TREE),
                             tree))
    return tree
Пример #29
0
    def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False):
        def apply_acl(acl_rep, kind):
            try:
                acl = posix1e.ACL(text = acl_rep)
            except IOError as e:
                if e.errno == 0:
                    # pylibacl appears to return an IOError with errno
                    # set to 0 if a group referred to by the ACL rep
                    # doesn't exist on the current system.
                    raise ApplyError("POSIX1e ACL: can't create %r for %r"
                                     % (acl_rep, path))
                else:
                    raise
            try:
                acl.applyto(path, kind)
            except IOError as e:
                if e.errno == errno.EPERM or e.errno == errno.EOPNOTSUPP:
                    raise ApplyError('POSIX1e ACL applyto: %s' % e)
                else:
                    raise

        if not posix1e:
            if self.posix1e_acl:
                add_error("%s: can't restore ACLs; posix1e support missing.\n"
                          % path)
            return
        if self.posix1e_acl:
            acls = self.posix1e_acl
            if len(acls) > 2:
                if restore_numeric_ids:
                    apply_acl(acls[3], posix1e.ACL_TYPE_DEFAULT)
                else:
                    apply_acl(acls[2], posix1e.ACL_TYPE_DEFAULT)
            if restore_numeric_ids:
                apply_acl(acls[1], posix1e.ACL_TYPE_ACCESS)
            else:
                apply_acl(acls[0], posix1e.ACL_TYPE_ACCESS)
Пример #30
0
def reduce_paths(paths):
    xpaths = []
    for p in paths:
        rp = resolve_parent(p)
        try:
            st = os.lstat(rp)
            if stat.S_ISDIR(st.st_mode):
                rp = slashappend(rp)
                p = slashappend(p)
            xpaths.append((rp, p))
        except OSError as e:
            add_error('reduce_paths: %s' % e)
    xpaths.sort()

    paths = []
    prev = None
    for (rp, p) in xpaths:
        if prev and (prev == rp 
                     or (prev.endswith('/') and rp.startswith(prev))):
            continue # already superceded by previous path
        paths.append((rp, p))
        prev = rp
    paths.sort(reverse=True)
    return paths
Пример #31
0
def update_index(top,
                 excluded_paths,
                 exclude_rxs,
                 indexfile,
                 check=False,
                 check_device=True,
                 xdev=False,
                 xdev_exceptions=frozenset(),
                 fake_valid=False,
                 fake_invalid=False,
                 out=None,
                 verbose=0):
    # tmax must be epoch nanoseconds.
    tmax = (time.time() - 1) * 10**9

    with index.MetaStoreWriter(indexfile + b'.meta') as msw, \
         hlinkdb.HLinkDB(indexfile + b'.hlink') as hlinks, \
         index.Writer(indexfile, msw, tmax) as wi, \
         index.Reader(indexfile) as ri:

        rig = IterHelper(ri.iter(name=top))

        fake_hash = None
        if fake_valid:

            def fake_hash(name):
                return (GIT_MODE_FILE, index.FAKE_SHA)

        total = 0
        bup_dir = os.path.abspath(git.repo())
        index_start = time.time()
        for path, pst in recursive_dirlist([top],
                                           xdev=xdev,
                                           bup_dir=bup_dir,
                                           excluded_paths=excluded_paths,
                                           exclude_rxs=exclude_rxs,
                                           xdev_exceptions=xdev_exceptions):
            if verbose >= 2 or (verbose == 1 and stat.S_ISDIR(pst.st_mode)):
                out.write(b'%s\n' % path)
                out.flush()
                elapsed = time.time() - index_start
                paths_per_sec = total / elapsed if elapsed else 0
                qprogress('Indexing: %d (%d paths/s)\r' %
                          (total, paths_per_sec))
            elif not (total % 128):
                elapsed = time.time() - index_start
                paths_per_sec = total / elapsed if elapsed else 0
                qprogress('Indexing: %d (%d paths/s)\r' %
                          (total, paths_per_sec))
            total += 1

            while rig.cur and rig.cur.name > path:  # deleted paths
                if rig.cur.exists():
                    rig.cur.set_deleted()
                    rig.cur.repack()
                    if rig.cur.nlink > 1 and not stat.S_ISDIR(rig.cur.mode):
                        hlinks.del_path(rig.cur.name)
                rig.next()

            if rig.cur and rig.cur.name == path:  # paths that already existed
                need_repack = False
                if (rig.cur.stale(pst, check_device=check_device)):
                    try:
                        meta = metadata.from_path(path, statinfo=pst)
                    except (OSError, IOError) as e:
                        add_error(e)
                        rig.next()
                        continue
                    if not stat.S_ISDIR(rig.cur.mode) and rig.cur.nlink > 1:
                        hlinks.del_path(rig.cur.name)
                    if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
                        hlinks.add_path(path, pst.st_dev, pst.st_ino)
                    # Clear these so they don't bloat the store -- they're
                    # already in the index (since they vary a lot and they're
                    # fixed length).  If you've noticed "tmax", you might
                    # wonder why it's OK to do this, since that code may
                    # adjust (mangle) the index mtime and ctime -- producing
                    # fake values which must not end up in a .bupm.  However,
                    # it looks like that shouldn't be possible:  (1) When
                    # "save" validates the index entry, it always reads the
                    # metadata from the filesytem. (2) Metadata is only
                    # read/used from the index if hashvalid is true. (3)
                    # "faked" entries will be stale(), and so we'll invalidate
                    # them below.
                    meta.ctime = meta.mtime = meta.atime = 0
                    meta_ofs = msw.store(meta)
                    rig.cur.update_from_stat(pst, meta_ofs)
                    rig.cur.invalidate()
                    need_repack = True
                if not (rig.cur.flags & index.IX_HASHVALID):
                    if fake_hash:
                        if rig.cur.sha == index.EMPTY_SHA:
                            rig.cur.gitmode, rig.cur.sha = fake_hash(path)
                        rig.cur.flags |= index.IX_HASHVALID
                        need_repack = True
                if fake_invalid:
                    rig.cur.invalidate()
                    need_repack = True
                if need_repack:
                    rig.cur.repack()
                rig.next()
            else:  # new paths
                try:
                    meta = metadata.from_path(path, statinfo=pst)
                except (OSError, IOError) as e:
                    add_error(e)
                    continue
                # See same assignment to 0, above, for rationale.
                meta.atime = meta.mtime = meta.ctime = 0
                meta_ofs = msw.store(meta)
                wi.add(path, pst, meta_ofs, hashgen=fake_hash)
                if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
                    hlinks.add_path(path, pst.st_dev, pst.st_ino)

        elapsed = time.time() - index_start
        paths_per_sec = total / elapsed if elapsed else 0
        progress('Indexing: %d, done (%d paths/s).\n' % (total, paths_per_sec))

        hlinks.prepare_save()

        if not ri.exists():
            wi.close()
        else:
            ri.save()
            wi.flush()
            if wi.count:
                with wi.new_reader() as wr:
                    if check:
                        log('check: before merging: oldfile\n')
                        check_index(ri, verbose)
                        log('check: before merging: newfile\n')
                        check_index(wr, verbose)
                    with index.Writer(indexfile, msw, tmax) as mi:
                        for e in index.merge(ri, wr):
                            # FIXME: shouldn't we remove deleted entries
                            # eventually?  When?
                            mi.add_ixentry(e)
                        mi.close()

        hlinks.commit_save()
Пример #32
0
            os.mknod(path, 0600 | stat.S_IFSOCK)
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0777) ^ 0777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path, self.mode))

    def _apply_common_rec(self, path, restore_numeric_ids=False):
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        # EACCES errors at this stage are fatal for the current path.
        if lutime and stat.S_ISLNK(self.mode):
            try:
                lutime(path, (self.atime, self.mtime))
            except OSError, e:
                if e.errno == errno.EACCES:
                    raise ApplyError('lutime: %s' % e)
                else:
                    raise
        else:
            try:
                utime(path, (self.atime, self.mtime))
Пример #33
0
    elif first_root != dirp[0]:
        root_collision = True

    # If switching to a new sub-tree, finish the current sub-tree.
    while parts > [x[0] for x in dirp]:
        _pop(force_tree = None)

    # If switching to a new sub-tree, start a new sub-tree.
    for path_component in dirp[len(parts):]:
        dir_name, fs_path = path_component
        # Not indexed, so just grab the FS metadata or use empty metadata.
        try:
            meta = metadata.from_path(fs_path, normalized=True) \
                if fs_path else metadata.Metadata()
        except (OSError, IOError) as e:
            add_error(e)
            lastskip_name = dir_name
            meta = metadata.Metadata()
        _push(dir_name, meta)

    if not file:
        if len(parts) == 1:
            continue # We're at the top level -- keep the current root dir
        # Since there's no filename, this is a subdir -- finish it.
        oldtree = already_saved(ent) # may be None
        newtree = _pop(force_tree = oldtree)
        if not oldtree:
            if lastskip_name and lastskip_name.startswith(ent.name):
                ent.invalidate()
            else:
                ent.validate(GIT_MODE_TREE, newtree)
Пример #34
0
        if treat_include_fields_as_definitive:
            active_fields = include_fields
            treat_include_fields_as_definitive = False
        else:
            active_fields = active_fields | include_fields

opt.verbose = opt.verbose or 0
opt.quiet = opt.quiet or 0
metadata.verbose = opt.verbose - opt.quiet

for path in remainder:
    try:
        m = metadata.from_path(path, archive_path = path)
    except (OSError,IOError), e:
        if e.errno == errno.ENOENT:
            add_error(e)
            continue
        else:
            raise
    if 'path' in active_fields:
        print 'path:', m.path
    if 'mode' in active_fields:
        print 'mode:', oct(m.mode)
    if 'link-target' in active_fields and stat.S_ISLNK(m.mode):
        print 'link-target:', m.symlink_target
    if 'rdev' in active_fields:
        print 'rdev:', m.rdev
    if 'uid' in active_fields:
        print 'uid:', m.uid
    if 'gid' in active_fields:
        print 'gid:', m.gid
Пример #35
0
def main(argv):

    target_filename = b''
    active_fields = metadata.all_fields

    o = options.Options(optspec)
    (opt, flags, remainder) = o.parse_bytes(argv[1:])

    atime_resolution = parse_timestamp_arg(o, 'atime', opt.atime_resolution)
    mtime_resolution = parse_timestamp_arg(o, 'mtime', opt.mtime_resolution)
    ctime_resolution = parse_timestamp_arg(o, 'ctime', opt.ctime_resolution)

    treat_include_fields_as_definitive = True
    for flag, value in flags:
        if flag == '--exclude-fields':
            exclude_fields = frozenset(value.split(','))
            for f in exclude_fields:
                if not f in metadata.all_fields:
                    o.fatal(f + ' is not a valid field name')
            active_fields = active_fields - exclude_fields
            treat_include_fields_as_definitive = False
        elif flag == '--include-fields':
            include_fields = frozenset(value.split(','))
            for f in include_fields:
                if not f in metadata.all_fields:
                    o.fatal(f + ' is not a valid field name')
            if treat_include_fields_as_definitive:
                active_fields = include_fields
                treat_include_fields_as_definitive = False
            else:
                active_fields = active_fields | include_fields

    opt.verbose = opt.verbose or 0
    opt.quiet = opt.quiet or 0
    metadata.verbose = opt.verbose - opt.quiet

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    first_path = True
    for path in remainder:
        path = argv_bytes(path)
        try:
            m = metadata.from_path(path, archive_path = path)
        except (OSError,IOError) as e:
            if e.errno == errno.ENOENT:
                add_error(e)
                continue
            else:
                raise
        if metadata.verbose >= 0:
            if not first_path:
                out.write(b'\n')
            if atime_resolution != 1:
                m.atime = (m.atime / atime_resolution) * atime_resolution
            if mtime_resolution != 1:
                m.mtime = (m.mtime / mtime_resolution) * mtime_resolution
            if ctime_resolution != 1:
                m.ctime = (m.ctime / ctime_resolution) * ctime_resolution
            out.write(metadata.detailed_bytes(m, active_fields))
            out.write(b'\n')
            first_path = False

    if saved_errors:
        log('WARNING: %d errors encountered.\n' % len(saved_errors))
        sys.exit(1)
    else:
        sys.exit(0)
Пример #36
0
                s.bind(path)
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0777) ^ 0777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path, self.mode))

    def _apply_common_rec(self, path, restore_numeric_ids=False):
        if not self.mode:
            raise ApplyError('no metadata - cannot apply to ' + path)

        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        # EACCES errors at this stage are fatal for the current path.
        if lutime and stat.S_ISLNK(self.mode):
            try:
                lutime(path, (self.atime, self.mtime))
            except OSError, e:
                if e.errno == errno.EACCES:
                    raise ApplyError('lutime: %s' % e)
                else:
                    raise
Пример #37
0
    def _create_via_common_rec(self, path, create_symlinks=True):
        if not self.mode:
            raise ApplyError('no metadata - cannot create path ' + path)

        # If the path already exists and is a dir, try rmdir.
        # If the path already exists and is anything else, try unlink.
        st = None
        try:
            st = xstat.lstat(path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        if st:
            if stat.S_ISDIR(st.st_mode):
                try:
                    os.rmdir(path)
                except OSError as e:
                    if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
                        msg = 'refusing to overwrite non-empty dir ' + path
                        raise Exception(msg)
                    raise
            else:
                os.unlink(path)

        if stat.S_ISREG(self.mode):
            assert (self._recognized_file_type())
            fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o600)
            os.close(fd)
        elif stat.S_ISDIR(self.mode):
            assert (self._recognized_file_type())
            os.mkdir(path, 0o700)
        elif stat.S_ISCHR(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev)
        elif stat.S_ISBLK(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev)
        elif stat.S_ISFIFO(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFIFO)
        elif stat.S_ISSOCK(self.mode):
            try:
                os.mknod(path, 0o600 | stat.S_IFSOCK)
            except OSError as e:
                if e.errno in (errno.EINVAL, errno.EPERM):
                    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                    s.bind(path)
                else:
                    raise
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0o777) ^ 0o777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path, self.mode))
Пример #38
0
def main():
    handle_ctrl_c()
    is_reverse = os.environ.get('BUP_SERVER_REVERSE')
    opt = parse_args(sys.argv)
    git.check_repo_or_die()
    src_dir = opt.source or git.repo()
    if opt.bwlimit:
        client.bwlimit = parse_num(opt.bwlimit)
    if is_reverse and opt.remote:
        misuse("don't use -r in reverse mode; it's automatic")
    if opt.remote or is_reverse:
        dest_repo = RemoteRepo(opt.remote)
    else:
        dest_repo = LocalRepo()

    with dest_repo as dest_repo:
        with LocalRepo(repo_dir=src_dir) as src_repo:
            with dest_repo.new_packwriter(
                    compression_level=opt.compress) as writer:
                # Resolve and validate all sources and destinations,
                # implicit or explicit, and do it up-front, so we can
                # fail before we start writing (for any obviously
                # broken cases).
                target_items = resolve_targets(opt.target_specs, src_repo,
                                               dest_repo)

                updated_refs = {
                }  # ref_name -> (original_ref, tip_commit(bin))
                no_ref_info = (None, None)

                handlers = {
                    'ff': handle_ff,
                    'append': handle_append,
                    'force-pick': handle_pick,
                    'pick': handle_pick,
                    'new-tag': handle_new_tag,
                    'replace': handle_replace,
                    'unnamed': handle_unnamed
                }

                for item in target_items:
                    debug1('get-spec: %s\n' % str(item.spec))
                    debug1('get-src: %s\n' % loc_desc(item.src))
                    debug1('get-dest: %s\n' % loc_desc(item.dest))
                    dest_path = item.dest and item.dest.path
                    if dest_path:
                        if dest_path.startswith('/.tag/'):
                            dest_ref = 'refs/tags/%s' % dest_path[6:]
                        else:
                            dest_ref = 'refs/heads/%s' % dest_path[1:]
                    else:
                        dest_ref = None

                    dest_hash = item.dest and item.dest.hash
                    orig_ref, cur_ref = updated_refs.get(dest_ref, no_ref_info)
                    orig_ref = orig_ref or dest_hash
                    cur_ref = cur_ref or dest_hash

                    handler = handlers[item.spec.method]
                    item_result = handler(item, src_repo, writer, opt)
                    if len(item_result) > 1:
                        new_id, tree = item_result
                    else:
                        new_id = item_result[0]

                    if not dest_ref:
                        log_item(item.spec.src, item.src.type, opt)
                    else:
                        updated_refs[dest_ref] = (orig_ref, new_id)
                        if dest_ref.startswith('refs/tags/'):
                            log_item(item.spec.src,
                                     item.src.type,
                                     opt,
                                     tag=new_id)
                        else:
                            log_item(item.spec.src,
                                     item.src.type,
                                     opt,
                                     tree=tree,
                                     commit=new_id)

        # Only update the refs at the very end, once the writer is
        # closed, so that if something goes wrong above, the old refs
        # will be undisturbed.
        for ref_name, info in updated_refs.iteritems():
            orig_ref, new_ref = info
            try:
                dest_repo.update_ref(ref_name, new_ref, orig_ref)
                if opt.verbose:
                    new_hex = new_ref.encode('hex')
                    if orig_ref:
                        orig_hex = orig_ref.encode('hex')
                        log('updated %r (%s -> %s)\n' %
                            (ref_name, orig_hex, new_hex))
                    else:
                        log('updated %r (%s)\n' % (ref_name, new_hex))
            except (git.GitError, client.ClientError), ex:
                add_error('unable to update ref %r: %s' % (ref_name, ex))
Пример #39
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])
    verbosity = (opt.verbose or 0) if not opt.quiet else -1
    if opt.outdir:
        opt.outdir = argv_bytes(opt.outdir)

    if not extra:
        o.fatal('must specify at least one filename to restore')

    exclude_rxs = parse_rx_excludes(flags, o.fatal)

    owner_map = {}
    for map_type in ('user', 'group', 'uid', 'gid'):
        owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal)

    if opt.outdir:
        mkdirp(opt.outdir)
        os.chdir(opt.outdir)

    repo = from_opts(opt, reverse=False)
    top = fsencode(os.getcwd())
    hardlinks = {}
    for path in [argv_bytes(x) for x in extra]:
        if not valid_restore_path(path):
            add_error("path %r doesn't include a branch and revision" % path)
            continue
        try:
            resolved = vfs.resolve(repo, path, want_meta=True, follow=False)
        except vfs.IOError as e:
            add_error(e)
            continue
        if len(resolved) == 3 and resolved[2][0] == b'latest':
            # Follow latest symlink to the actual save
            try:
                resolved = vfs.resolve(repo,
                                       b'latest',
                                       parent=resolved[:-1],
                                       want_meta=True)
            except vfs.IOError as e:
                add_error(e)
                continue
            # Rename it back to 'latest'
            resolved = tuple(elt if i != 2 else (b'latest', ) + elt[1:]
                             for i, elt in enumerate(resolved))
        path_parent, path_name = os.path.split(path)
        leaf_name, leaf_item = resolved[-1]
        if not leaf_item:
            add_error('error: cannot access %r in %r' %
                      (b'/'.join(name for name, item in resolved), path))
            continue
        if not path_name or path_name == b'.':
            # Source is /foo/what/ever/ or /foo/what/ever/. -- extract
            # what/ever/* to the current directory, and if name == '.'
            # (i.e. /foo/what/ever/.), then also restore what/ever's
            # metadata to the current directory.
            treeish = vfs.item_mode(leaf_item)
            if not treeish:
                add_error('%r cannot be restored as a directory' % path)
            else:
                items = vfs.contents(repo, leaf_item, want_meta=True)
                dot, leaf_item = next(items, None)
                assert dot == b'.'
                for sub_name, sub_item in items:
                    restore(repo, b'', sub_name, sub_item, top, opt.sparse,
                            opt.numeric_ids, owner_map, exclude_rxs, verbosity,
                            hardlinks)
                if path_name == b'.':
                    leaf_item = vfs.augment_item_meta(repo,
                                                      leaf_item,
                                                      include_size=True)
                    apply_metadata(leaf_item.meta, b'.', opt.numeric_ids,
                                   owner_map)
        else:
            restore(repo, b'', leaf_name, leaf_item, top, opt.sparse,
                    opt.numeric_ids, owner_map, exclude_rxs, verbosity,
                    hardlinks)

    if verbosity >= 0:
        progress('Restoring: %d, done.\n' % total_restored)
    die_if_errors()
Пример #40
0
    def _apply_common_rec(self, path, restore_numeric_ids=False):
        if not self.mode:
            raise ApplyError('no metadata - cannot apply to ' + path)

        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        # EACCES errors at this stage are fatal for the current path.
        if lutime and stat.S_ISLNK(self.mode):
            try:
                lutime(path, (self.atime, self.mtime))
            except OSError as e:
                if e.errno == errno.EACCES:
                    raise ApplyError('lutime: %s' % e)
                else:
                    raise
        else:
            try:
                utime(path, (self.atime, self.mtime))
            except OSError as e:
                if e.errno == errno.EACCES:
                    raise ApplyError('utime: %s' % e)
                else:
                    raise

        uid = gid = -1  # By default, do nothing.
        if is_superuser():
            uid = self.uid
            gid = self.gid
            if not restore_numeric_ids:
                if self.uid != 0 and self.user:
                    entry = pwd_from_name(self.user)
                    if entry:
                        uid = entry.pw_uid
                if self.gid != 0 and self.group:
                    entry = grp_from_name(self.group)
                    if entry:
                        gid = entry.gr_gid
        else:  # not superuser - only consider changing the group/gid
            user_gids = os.getgroups()
            if self.gid in user_gids:
                gid = self.gid
            if not restore_numeric_ids and self.gid != 0:
                # The grp might not exist on the local system.
                grps = filter(None, [grp_from_gid(x) for x in user_gids])
                if self.group in [x.gr_name for x in grps]:
                    g = grp_from_name(self.group)
                    if g:
                        gid = g.gr_gid

        if uid != -1 or gid != -1:
            try:
                os.lchown(path, uid, gid)
            except OSError as e:
                if e.errno == errno.EPERM:
                    add_error('lchown: %s' % e)
                elif sys.platform.startswith('cygwin') \
                   and e.errno == errno.EINVAL:
                    add_error('lchown: unknown uid/gid (%d/%d) for %s' %
                              (uid, gid, path))
                else:
                    raise

        if _have_lchmod:
            try:
                os.lchmod(path, stat.S_IMODE(self.mode))
            except errno.ENOSYS:  # Function not implemented
                pass
        elif not stat.S_ISLNK(self.mode):
            os.chmod(path, stat.S_IMODE(self.mode))
Пример #41
0
def main():
    handle_ctrl_c()
    opt = parse_args(sys.argv)
    if opt.source:
        opt.source = argv_bytes(opt.source)
    src_dir = opt.source or git.repo()
    if opt.bwlimit:
        client.bwlimit = parse_num(opt.bwlimit)
    if opt.remote:
        opt.remote = argv_bytes(opt.remote)
    dest_repo = repo.from_opts(opt)

    with dest_repo as dest_repo:
        with LocalRepo(repo_dir=src_dir) as src_repo:
            # Resolve and validate all sources and destinations,
            # implicit or explicit, and do it up-front, so we can
            # fail before we start writing (for any obviously
            # broken cases).
            target_items = resolve_targets(opt.target_specs, src_repo,
                                           dest_repo)

            updated_refs = {}  # ref_name -> (original_ref, tip_commit(bin))
            no_ref_info = (None, None)

            handlers = {
                'ff': handle_ff,
                'append': handle_append,
                'force-pick': handle_pick,
                'pick': handle_pick,
                'new-tag': handle_new_tag,
                'replace': handle_replace,
                'unnamed': handle_unnamed
            }

            for item in target_items:
                debug1('get-spec: %r\n' % (item.spec, ))
                debug1('get-src: %s\n' % loc_desc(item.src))
                debug1('get-dest: %s\n' % loc_desc(item.dest))
                dest_path = item.dest and item.dest.path
                if dest_path:
                    if dest_path.startswith(b'/.tag/'):
                        dest_ref = b'refs/tags/%s' % dest_path[6:]
                    else:
                        dest_ref = b'refs/heads/%s' % dest_path[1:]
                else:
                    dest_ref = None

                dest_hash = item.dest and item.dest.hash
                orig_ref, cur_ref = updated_refs.get(dest_ref, no_ref_info)
                orig_ref = orig_ref or dest_hash
                cur_ref = cur_ref or dest_hash

                handler = handlers[item.spec.method]
                item_result = handler(item, src_repo, dest_repo, opt)
                if len(item_result) > 1:
                    new_id, tree = item_result
                else:
                    new_id = item_result[0]

                if not dest_ref:
                    log_item(item.spec.src, item.src.type, opt)
                else:
                    updated_refs[dest_ref] = (orig_ref, new_id)
                    if dest_ref.startswith(b'refs/tags/'):
                        log_item(item.spec.src, item.src.type, opt, tag=new_id)
                    else:
                        log_item(item.spec.src,
                                 item.src.type,
                                 opt,
                                 tree=tree,
                                 commit=new_id)

        # Only update the refs at the very end, once the destination repo
        # finished writing, so that if something goes wrong above, the old
        # refs will be undisturbed.
        for ref_name, info in items(updated_refs):
            orig_ref, new_ref = info
            try:
                dest_repo.update_ref(ref_name, new_ref, orig_ref)
                if opt.verbose:
                    new_hex = hexlify(new_ref)
                    if orig_ref:
                        orig_hex = hexlify(orig_ref)
                        log('updated %r (%s -> %s)\n' %
                            (ref_name, orig_hex, new_hex))
                    else:
                        log('updated %r (%s)\n' % (ref_name, new_hex))
            except (git.GitError, client.ClientError) as ex:
                add_error('unable to update ref %r: %s' % (ref_name, ex))

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' %
            len(saved_errors))
        sys.exit(1)
Пример #42
0
def main():
    o = options.Options(optspec)
    opt, flags, extra = o.parse(sys.argv[1:])
    verbosity = opt.verbose if not opt.quiet else -1

    git.check_repo_or_die()

    if not extra:
        o.fatal('must specify at least one filename to restore')

    exclude_rxs = parse_rx_excludes(flags, o.fatal)

    owner_map = {}
    for map_type in ('user', 'group', 'uid', 'gid'):
        owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal)

    if opt.outdir:
        mkdirp(opt.outdir)
        os.chdir(opt.outdir)

    repo = RemoteRepo(opt.remote) if opt.remote else LocalRepo()
    top = os.getcwd()
    hardlinks = {}
    for path in extra:
        if not valid_restore_path(path):
            add_error("path %r doesn't include a branch and revision" % path)
            continue
        try:
            resolved = vfs.lresolve(repo, path, want_meta=True)
        except vfs.IOError as e:
            add_error(e)
            continue
        path_parent, path_name = os.path.split(path)
        leaf_name, leaf_item = resolved[-1]
        if not leaf_item:
            add_error('error: cannot access %r in %r' %
                      ('/'.join(name for name, item in resolved), path))
            continue
        if not path_name or path_name == '.':
            # Source is /foo/what/ever/ or /foo/what/ever/. -- extract
            # what/ever/* to the current directory, and if name == '.'
            # (i.e. /foo/what/ever/.), then also restore what/ever's
            # metadata to the current directory.
            treeish = vfs.item_mode(leaf_item)
            if not treeish:
                add_error('%r cannot be restored as a directory' % path)
            else:
                items = vfs.contents(repo, leaf_item, want_meta=True)
                dot, leaf_item = next(items, None)
                assert (dot == '.')
                for sub_name, sub_item in items:
                    restore(repo, '', sub_name, sub_item, top, opt.sparse,
                            opt.numeric_ids, owner_map, exclude_rxs, verbosity,
                            hardlinks)
                if path_name == '.':
                    leaf_item = vfs.augment_item_meta(repo,
                                                      leaf_item,
                                                      include_size=True)
                    apply_metadata(leaf_item.meta, '.', opt.numeric_ids,
                                   owner_map)
        else:
            restore(repo, '', leaf_name, leaf_item, top, opt.sparse,
                    opt.numeric_ids, owner_map, exclude_rxs, verbosity,
                    hardlinks)

    if verbosity >= 0:
        progress('Restoring: %d, done.\n' % total_restored)
    die_if_errors()
Пример #43
0
 def _add_symlink_target(self, path, st):
     try:
         if stat.S_ISLNK(st.st_mode):
             self.symlink_target = os.readlink(path)
     except OSError, e:
         add_error('readlink: %s', e)
Пример #44
0
def main(argv):

    # Hack around lack of nonlocal vars in python 2
    _nonlocal = {}

    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if opt.indexfile:
        opt.indexfile = argv_bytes(opt.indexfile)
    if opt.name:
        opt.name = argv_bytes(opt.name)
    if opt.remote:
        opt.remote = argv_bytes(opt.remote)
    if opt.strip_path:
        opt.strip_path = argv_bytes(opt.strip_path)

    git.check_repo_or_die()
    if not (opt.tree or opt.commit or opt.name):
        o.fatal("use one or more of -t, -c, -n")
    if not extra:
        o.fatal("no filenames given")

    extra = [argv_bytes(x) for x in extra]

    opt.progress = (istty2 and not opt.quiet)
    opt.smaller = parse_num(opt.smaller or 0)
    if opt.bwlimit:
        client.bwlimit = parse_num(opt.bwlimit)

    if opt.date:
        date = parse_date_or_fatal(opt.date, o.fatal)
    else:
        date = time.time()

    if opt.strip and opt.strip_path:
        o.fatal("--strip is incompatible with --strip-path")

    graft_points = []
    if opt.graft:
        if opt.strip:
            o.fatal("--strip is incompatible with --graft")

        if opt.strip_path:
            o.fatal("--strip-path is incompatible with --graft")

        for (option, parameter) in flags:
            if option == "--graft":
                parameter = argv_bytes(parameter)
                splitted_parameter = parameter.split(b'=')
                if len(splitted_parameter) != 2:
                    o.fatal("a graft point must be of the form old_path=new_path")
                old_path, new_path = splitted_parameter
                if not (old_path and new_path):
                    o.fatal("a graft point cannot be empty")
                graft_points.append((resolve_parent(old_path),
                                     resolve_parent(new_path)))

    is_reverse = environ.get(b'BUP_SERVER_REVERSE')
    if is_reverse and opt.remote:
        o.fatal("don't use -r in reverse mode; it's automatic")

    name = opt.name
    if name and not valid_save_name(name):
        o.fatal("'%s' is not a valid branch name" % path_msg(name))
    refname = name and b'refs/heads/%s' % name or None
    if opt.remote or is_reverse:
        try:
            cli = client.Client(opt.remote)
        except client.ClientError as e:
            log('error: %s' % e)
            sys.exit(1)
        oldref = refname and cli.read_ref(refname) or None
        w = cli.new_packwriter(compression_level=opt.compress)
    else:
        cli = None
        oldref = refname and git.read_ref(refname) or None
        w = git.PackWriter(compression_level=opt.compress)

    handle_ctrl_c()


    # Metadata is stored in a file named .bupm in each directory.  The
    # first metadata entry will be the metadata for the current directory.
    # The remaining entries will be for each of the other directory
    # elements, in the order they're listed in the index.
    #
    # Since the git tree elements are sorted according to
    # git.shalist_item_sort_key, the metalist items are accumulated as
    # (sort_key, metadata) tuples, and then sorted when the .bupm file is
    # created.  The sort_key should have been computed using the element's
    # mangled name and git mode (after hashsplitting), but the code isn't
    # actually doing that but rather uses the element's real name and mode.
    # This makes things a bit more difficult when reading it back, see
    # vfs.ordered_tree_entries().

    # Maintain a stack of information representing the current location in
    # the archive being constructed.  The current path is recorded in
    # parts, which will be something like
    #      [StackDir(name=''), StackDir(name='home'), StackDir(name='someuser')],
    # and the accumulated content and metadata for files in the dirs is stored
    # in the .items member of the StackDir.

    stack = []


    def _push(part, metadata):
        # Enter a new archive directory -- make it the current directory.
        item = StackDir(part, metadata)
        stack.append(item)


    def _pop(force_tree=None, dir_metadata=None):
        # Leave the current archive directory and add its tree to its parent.
        item = stack.pop()
        # FIXME: only test if collision is possible (i.e. given --strip, etc.)?
        if force_tree:
            tree = force_tree
        else:
            names_seen = set()
            clean_list = []
            for x in item.items:
                name = x.name
                if name in names_seen:
                    parent_path = b'/'.join(x.name for x in stack) + b'/'
                    add_error('error: ignoring duplicate path %s in %s'
                              % (path_msg(name), path_msg(parent_path)))
                else:
                    names_seen.add(name)
                    clean_list.append(x)

            # if set, overrides the original metadata pushed for this dir.
            if dir_metadata is None:
                dir_metadata = item.meta
            metalist = [(b'', dir_metadata)]
            metalist += [(git.shalist_item_sort_key((entry.mode, entry.name, None)),
                          entry.meta)
                         for entry in clean_list if entry.mode != GIT_MODE_TREE]
            metalist.sort(key = lambda x: x[0])
            metadata = BytesIO(b''.join(m[1].encode() for m in metalist))
            mode, id = hashsplit.split_to_blob_or_tree(w.new_blob, w.new_tree,
                                                       [metadata],
                                                       keep_boundaries=False)
            shalist = [(mode, b'.bupm', id)]
            shalist += [(entry.gitmode,
                         git.mangle_name(entry.name, entry.mode, entry.gitmode),
                         entry.oid)
                        for entry in clean_list]

            tree = w.new_tree(shalist)
        if stack:
            stack[-1].append(item.name, GIT_MODE_TREE, GIT_MODE_TREE, tree, None)
        return tree


    _nonlocal['count'] = 0
    _nonlocal['subcount'] = 0
    _nonlocal['lastremain'] = None

    def progress_report(n):
        _nonlocal['subcount'] += n
        cc = _nonlocal['count'] + _nonlocal['subcount']
        pct = total and (cc*100.0/total) or 0
        now = time.time()
        elapsed = now - tstart
        kps = elapsed and int(cc/1024./elapsed)
        kps_frac = 10 ** int(math.log(kps+1, 10) - 1)
        kps = int(kps/kps_frac)*kps_frac
        if cc:
            remain = elapsed*1.0/cc * (total-cc)
        else:
            remain = 0.0
        if (_nonlocal['lastremain'] and (remain > _nonlocal['lastremain'])
              and ((remain - _nonlocal['lastremain'])/_nonlocal['lastremain'] < 0.05)):
            remain = _nonlocal['lastremain']
        else:
            _nonlocal['lastremain'] = remain
        hours = int(remain/60/60)
        mins = int(remain/60 - hours*60)
        secs = int(remain - hours*60*60 - mins*60)
        if elapsed < 30:
            remainstr = ''
            kpsstr = ''
        else:
            kpsstr = '%dk/s' % kps
            if hours:
                remainstr = '%dh%dm' % (hours, mins)
            elif mins:
                remainstr = '%dm%d' % (mins, secs)
            else:
                remainstr = '%ds' % secs
        qprogress('Saving: %.2f%% (%d/%dk, %d/%d files) %s %s\r'
                  % (pct, cc/1024, total/1024, fcount, ftotal,
                     remainstr, kpsstr))


    indexfile = opt.indexfile or git.repo(b'bupindex')
    r = index.Reader(indexfile)
    try:
        msr = index.MetaStoreReader(indexfile + b'.meta')
    except IOError as ex:
        if ex.errno != EACCES:
            raise
        log('error: cannot access %r; have you run bup index?'
            % path_msg(indexfile))
        sys.exit(1)
    hlink_db = hlinkdb.HLinkDB(indexfile + b'.hlink')

    def already_saved(ent):
        return ent.is_valid() and w.exists(ent.sha) and ent.sha

    def wantrecurse_pre(ent):
        return not already_saved(ent)

    def wantrecurse_during(ent):
        return not already_saved(ent) or ent.sha_missing()

    def find_hardlink_target(hlink_db, ent):
        if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1:
            link_paths = hlink_db.node_paths(ent.dev, ent.ino)
            if link_paths:
                return link_paths[0]

    total = ftotal = 0
    if opt.progress:
        for (transname,ent) in r.filter(extra, wantrecurse=wantrecurse_pre):
            if not (ftotal % 10024):
                qprogress('Reading index: %d\r' % ftotal)
            exists = ent.exists()
            hashvalid = already_saved(ent)
            ent.set_sha_missing(not hashvalid)
            if not opt.smaller or ent.size < opt.smaller:
                if exists and not hashvalid:
                    total += ent.size
            ftotal += 1
        progress('Reading index: %d, done.\n' % ftotal)
        hashsplit.progress_callback = progress_report

    # Root collisions occur when strip or graft options map more than one
    # path to the same directory (paths which originally had separate
    # parents).  When that situation is detected, use empty metadata for
    # the parent.  Otherwise, use the metadata for the common parent.
    # Collision example: "bup save ... --strip /foo /foo/bar /bar".

    # FIXME: Add collision tests, or handle collisions some other way.

    # FIXME: Detect/handle strip/graft name collisions (other than root),
    # i.e. if '/foo/bar' and '/bar' both map to '/'.

    first_root = None
    root_collision = None
    tstart = time.time()
    fcount = 0
    lastskip_name = None
    lastdir = b''
    for (transname,ent) in r.filter(extra, wantrecurse=wantrecurse_during):
        (dir, file) = os.path.split(ent.name)
        exists = (ent.flags & index.IX_EXISTS)
        hashvalid = already_saved(ent)
        wasmissing = ent.sha_missing()
        oldsize = ent.size
        if opt.verbose:
            if not exists:
                status = 'D'
            elif not hashvalid:
                if ent.sha == index.EMPTY_SHA:
                    status = 'A'
                else:
                    status = 'M'
            else:
                status = ' '
            if opt.verbose >= 2:
                log('%s %-70s\n' % (status, path_msg(ent.name)))
            elif not stat.S_ISDIR(ent.mode) and lastdir != dir:
                if not lastdir.startswith(dir):
                    log('%s %-70s\n' % (status, path_msg(os.path.join(dir, b''))))
                lastdir = dir

        if opt.progress:
            progress_report(0)
        fcount += 1

        if not exists:
            continue
        if opt.smaller and ent.size >= opt.smaller:
            if exists and not hashvalid:
                if opt.verbose:
                    log('skipping large file "%s"\n' % path_msg(ent.name))
                lastskip_name = ent.name
            continue

        assert(dir.startswith(b'/'))
        if opt.strip:
            dirp = stripped_path_components(dir, extra)
        elif opt.strip_path:
            dirp = stripped_path_components(dir, [opt.strip_path])
        elif graft_points:
            dirp = grafted_path_components(graft_points, dir)
        else:
            dirp = path_components(dir)

        # At this point, dirp contains a representation of the archive
        # path that looks like [(archive_dir_name, real_fs_path), ...].
        # So given "bup save ... --strip /foo/bar /foo/bar/baz", dirp
        # might look like this at some point:
        #   [('', '/foo/bar'), ('baz', '/foo/bar/baz'), ...].

        # This dual representation supports stripping/grafting, where the
        # archive path may not have a direct correspondence with the
        # filesystem.  The root directory is represented by an initial
        # component named '', and any component that doesn't have a
        # corresponding filesystem directory (due to grafting, for
        # example) will have a real_fs_path of None, i.e. [('', None),
        # ...].

        if first_root == None:
            first_root = dirp[0]
        elif first_root != dirp[0]:
            root_collision = True

        # If switching to a new sub-tree, finish the current sub-tree.
        while [x.name for x in stack] > [x[0] for x in dirp]:
            _pop()

        # If switching to a new sub-tree, start a new sub-tree.
        for path_component in dirp[len(stack):]:
            dir_name, fs_path = path_component
            # Not indexed, so just grab the FS metadata or use empty metadata.
            try:
                meta = metadata.from_path(fs_path, normalized=True) \
                    if fs_path else metadata.Metadata()
            except (OSError, IOError) as e:
                add_error(e)
                lastskip_name = dir_name
                meta = metadata.Metadata()
            _push(dir_name, meta)

        if not file:
            if len(stack) == 1:
                continue # We're at the top level -- keep the current root dir
            # Since there's no filename, this is a subdir -- finish it.
            oldtree = already_saved(ent) # may be None
            newtree = _pop(force_tree = oldtree)
            if not oldtree:
                if lastskip_name and lastskip_name.startswith(ent.name):
                    ent.invalidate()
                else:
                    ent.validate(GIT_MODE_TREE, newtree)
                ent.repack()
            if exists and wasmissing:
                _nonlocal['count'] += oldsize
            continue

        # it's not a directory
        if hashvalid:
            meta = msr.metadata_at(ent.meta_ofs)
            meta.hardlink_target = find_hardlink_target(hlink_db, ent)
            # Restore the times that were cleared to 0 in the metastore.
            (meta.atime, meta.mtime, meta.ctime) = (ent.atime, ent.mtime, ent.ctime)
            stack[-1].append(file, ent.mode, ent.gitmode, ent.sha, meta)
        else:
            id = None
            hlink = find_hardlink_target(hlink_db, ent)
            try:
                meta = metadata.from_path(ent.name, hardlink_target=hlink,
                                          normalized=True,
                                          after_stat=after_nondir_metadata_stat)
            except (OSError, IOError) as e:
                add_error(e)
                lastskip_name = ent.name
                continue
            if stat.S_IFMT(ent.mode) != stat.S_IFMT(meta.mode):
                # The mode changed since we indexed the file, this is bad.
                # This can cause two issues:
                # 1) We e.g. think the file is a regular file, but now it's
                #    something else (a device, socket, FIFO or symlink, etc.)
                #    and _read_ from it when we shouldn't.
                # 2) We then record it as valid, but don't update the index
                #    metadata, and on a subsequent save it has 'hashvalid'
                #    but is recorded as the file type from the index, when
                #    the content is something else ...
                # Avoid all of these consistency issues by just skipping such
                # things - it really ought to not happen anyway.
                add_error("%s: mode changed since indexing, skipping." % path_msg(ent.name))
                lastskip_name = ent.name
                continue
            if stat.S_ISREG(ent.mode):
                try:
                    # If the file changes while we're reading it, then our reading
                    # may stop at some point, but the stat() above may have gotten
                    # a different size already. Recalculate the meta size so that
                    # the repository records the accurate size in the metadata, even
                    # if the other stat() data might be slightly older than the file
                    # content (which we can't fix, this is inherently racy, but we
                    # can prevent the size mismatch.)
                    meta.size = 0
                    def new_blob(data):
                        meta.size += len(data)
                        return w.new_blob(data)
                    before_saving_regular_file(ent.name)
                    with hashsplit.open_noatime(ent.name) as f:
                        (mode, id) = hashsplit.split_to_blob_or_tree(
                                                new_blob, w.new_tree, [f],
                                                keep_boundaries=False)
                except (IOError, OSError) as e:
                    add_error('%s: %s' % (ent.name, e))
                    lastskip_name = ent.name
            elif stat.S_ISDIR(ent.mode):
                assert(0)  # handled above
            elif stat.S_ISLNK(ent.mode):
                mode, id = (GIT_MODE_SYMLINK, w.new_blob(meta.symlink_target))
            else:
                # Everything else should be fully described by its
                # metadata, so just record an empty blob, so the paths
                # in the tree and .bupm will match up.
                (mode, id) = (GIT_MODE_FILE, w.new_blob(b''))

            if id:
                ent.validate(mode, id)
                ent.repack()
                stack[-1].append(file, ent.mode, ent.gitmode, id, meta)

        if exists and wasmissing:
            _nonlocal['count'] += oldsize
            _nonlocal['subcount'] = 0


    if opt.progress:
        pct = total and _nonlocal['count']*100.0/total or 100
        progress('Saving: %.2f%% (%d/%dk, %d/%d files), done.    \n'
                 % (pct, _nonlocal['count']/1024, total/1024, fcount, ftotal))

    while len(stack) > 1: # _pop() all the parts above the root
        _pop()

    # Finish the root directory.
    # When there's a collision, use empty metadata for the root.
    tree = _pop(dir_metadata = metadata.Metadata() if root_collision else None)

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    if opt.tree:
        out.write(hexlify(tree))
        out.write(b'\n')
    if opt.commit or name:
        if compat.py_maj > 2:
            # Strip b prefix from python 3 bytes reprs to preserve previous format
             msgcmd = b'[%s]' % b', '.join([repr(argv_bytes(x))[1:].encode('ascii')
                                           for x in argv])
        else:
            msgcmd = repr(argv)
        msg = b'bup save\n\nGenerated by command:\n%s\n' % msgcmd
        userline = (b'%s <%s@%s>' % (userfullname(), username(), hostname()))
        commit = w.new_commit(tree, oldref, userline, date, None,
                              userline, date, None, msg)
        if opt.commit:
            out.write(hexlify(commit))
            out.write(b'\n')

    msr.close()
    w.close()  # must close before we can update the ref

    if opt.name:
        if cli:
            cli.update_ref(refname, commit, oldref)
        else:
            git.update_ref(refname, commit, oldref)

    if cli:
        cli.close()

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
        sys.exit(1)
Пример #45
0
                utime(path, (self.atime, self.mtime))
            except OSError, e:
                if e.errno == errno.EACCES:
                    raise ApplyError('utime: %s' % e)
                else:
                    raise

        # Don't try to restore owner unless we're root, and even
        # if asked, don't try to restore the owner or group if
        # it doesn't exist in the system db.
        uid = self.uid
        gid = self.gid
        if not restore_numeric_ids:
            if not self.owner:
                uid = -1
                add_error('ignoring missing owner for "%s"\n' % path)
            else:
                if os.geteuid() != 0:
                    uid = -1  # Not root; assume we can't change owner.
                else:
                    try:
                        uid = pwd.getpwnam(self.owner)[2]
                    except KeyError:
                        uid = -1
                        fmt = 'ignoring unknown owner %s for "%s"\n'
                        add_error(fmt % (self.owner, path))
            if not self.group:
                gid = -1
                add_error('ignoring missing group for "%s"\n' % path)
            else:
                try:
Пример #46
0
    def refresh(self, skip_midx=False):
        """Refresh the index list.
        This method verifies if .midx files were superseded (e.g. all of its
        contents are in another, bigger .midx file) and removes the superseded
        files.

        If skip_midx is True, all work on .midx files will be skipped and .midx
        files will be removed from the list.

        The module-global variable 'ignore_midx' can force this function to
        always act as if skip_midx was True.
        """
        self.bloom = None  # Always reopen the bloom as it may have been relaced
        self.do_bloom = False
        skip_midx = skip_midx or ignore_midx
        d = dict((p.name, p) for p in self.packs
                 if not skip_midx or not isinstance(p, midx.PackMidx))
        if os.path.exists(self.dir):
            if not skip_midx:
                midxl = []
                for ix in self.packs:
                    if isinstance(ix, midx.PackMidx):
                        for name in ix.idxnames:
                            d[os.path.join(self.dir, name)] = ix
                for full in glob.glob(os.path.join(self.dir, '*.midx')):
                    if not d.get(full):
                        mx = midx.PackMidx(full)
                        (mxd, mxf) = os.path.split(mx.name)
                        broken = False
                        for n in mx.idxnames:
                            if not os.path.exists(os.path.join(mxd, n)):
                                log(('warning: index %s missing\n' +
                                     '  used by %s\n') % (n, mxf))
                                broken = True
                        if broken:
                            mx.close()
                            del mx
                            unlink(full)
                        else:
                            midxl.append(mx)
                midxl.sort(
                    key=lambda ix: (-len(ix), -xstat.stat(ix.name).st_mtime))
                for ix in midxl:
                    any_needed = False
                    for sub in ix.idxnames:
                        found = d.get(os.path.join(self.dir, sub))
                        if not found or isinstance(found, PackIdx):
                            # doesn't exist, or exists but not in a midx
                            any_needed = True
                            break
                    if any_needed:
                        d[ix.name] = ix
                        for name in ix.idxnames:
                            d[os.path.join(self.dir, name)] = ix
                    elif not ix.force_keep:
                        debug1('midx: removing redundant: %s\n' %
                               os.path.basename(ix.name))
                        ix.close()
                        unlink(ix.name)
            for full in glob.glob(os.path.join(self.dir, '*.idx')):
                if not d.get(full):
                    try:
                        ix = open_idx(full)
                    except GitError as e:
                        add_error(e)
                        continue
                    d[full] = ix
            bfull = os.path.join(self.dir, 'bup.bloom')
            if self.bloom is None and os.path.exists(bfull):
                self.bloom = bloom.ShaBloom(bfull)
            self.packs = list(set(d.values()))
            self.packs.sort(reverse=True, key=lambda x: len(x))
            if self.bloom and self.bloom.valid() and len(
                    self.bloom) >= len(self):
                self.do_bloom = True
            else:
                self.bloom = None
        debug1('PackIdxList: using %d index%s.\n' %
               (len(self.packs), len(self.packs) != 1 and 'es' or ''))
Пример #47
0
        s = opt.find + '0'
    else:
        s = opt.find
    try:
        bin = s.decode('hex')
    except TypeError:
        o.fatal('--find parameter is not a valid hex string')

find = opt.find.lower()

count = 0
for name in extra:
    try:
        ix = git.open_idx(name)
    except git.GitError as e:
        add_error('%s: %s' % (name, e))
        continue
    if len(opt.find) == 40:
        if ix.exists(bin):
            print name, find
    else:
        # slow, exhaustive search
        for _i in ix:
            i = str(_i).encode('hex')
            if i.startswith(find):
                print name, i
            qprogress('Searching: %d\r' % count)
            count += 1

if saved_errors:
    log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
Пример #48
0
    o.fatal('must specify at least one filename to restore')

exclude_rxs = parse_rx_excludes(flags, o.fatal)

owner_map = {}
for map_type in ('user', 'group', 'uid', 'gid'):
    owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal)

if opt.outdir:
    mkdirp(opt.outdir)
    os.chdir(opt.outdir)

ret = 0
for d in extra:
    if not valid_restore_path(d):
        add_error("ERROR: path %r doesn't include a branch and revision" % d)
        continue
    path, name = os.path.split(d)
    try:
        n = top.lresolve(d)
    except vfs.NodeError as e:
        add_error(e)
        continue
    isdir = stat.S_ISDIR(n.mode)
    if not name or name == '.':
        # Source is /foo/what/ever/ or /foo/what/ever/. -- extract
        # what/ever/* to the current directory, and if name == '.'
        # (i.e. /foo/what/ever/.), then also restore what/ever's
        # metadata to the current directory.
        if not isdir:
            add_error('%r: not a directory' % d)