def _get_file(self, repo, path, resolved): """Process a request on a file. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ try: file_item = resolved[-1][1] file_item = vfs.augment_item_meta(repo, file_item, include_size=True) # we defer the set_header() calls until after we start writing # so we can still generate a 500 failure if something fails ... if self.request.method != 'HEAD': set_header = False with vfs.fopen(self.repo, file_item) as f: it = chunkyreader(f) for blob in chunkyreader(f): if not set_header: self._set_header(path, file_item) set_header = True self.write(blob) else: self._set_header(path, file_item) except Exception as e: self.set_status(500) self.write("<h1>Server Error</h1>\n") self.write("%s: %s\n" % (e.__class__.__name__, str(e))) raise gen.Return()
def getattr(self, path): path = argv_bytes(path) if self.verbose > 0: log('--getattr(%r)\n' % path) res = vfs.resolve(self.repo, path, want_meta=(not self.fake_metadata), follow=False) name, item = res[-1] if not item: return -errno.ENOENT if self.fake_metadata: item = vfs.augment_item_meta(self.repo, item, include_size=True) else: item = vfs.ensure_item_has_metadata(self.repo, item, include_size=True) meta = item.meta # FIXME: do we want/need to do anything more with nlink? st = fuse.Stat(st_mode=meta.mode, st_nlink=1, st_size=meta.size) st.st_mode = meta.mode st.st_uid = meta.uid or 0 st.st_gid = meta.gid or 0 st.st_atime = max(0, xstat.fstime_floor_secs(meta.atime)) st.st_mtime = max(0, xstat.fstime_floor_secs(meta.mtime)) st.st_ctime = max(0, xstat.fstime_floor_secs(meta.ctime)) return st
def run_augment_item_meta_tests(repo, file_path, file_size, link_path, link_target): _, file_item = vfs.resolve(repo, file_path)[-1] _, link_item = vfs.lresolve(repo, link_path)[-1] wvpass(isinstance(file_item.meta, Metadata)) wvpass(isinstance(link_item.meta, Metadata)) # Note: normally, modifying item.meta values is forbidden file_item.meta.size = file_item.meta.size or vfs.item_size(repo, file_item) link_item.meta.size = link_item.meta.size or vfs.item_size(repo, link_item) ## Ensure a fully populated item is left alone augmented = vfs.augment_item_meta(repo, file_item) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) augmented = vfs.augment_item_meta(repo, file_item, include_size=True) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) ## Ensure a missing size is handled poperly file_item.meta.size = None augmented = vfs.augment_item_meta(repo, file_item) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) augmented = vfs.augment_item_meta(repo, file_item, include_size=True) wvpass(augmented is not file_item) wvpasseq(file_size, augmented.meta.size) ## Ensure a meta mode is handled properly mode_item = file_item._replace(meta=vfs.default_file_mode) augmented = vfs.augment_item_meta(repo, mode_item) augmented_w_size = vfs.augment_item_meta(repo, mode_item, include_size=True) for item in (augmented, augmented_w_size): meta = item.meta wvpass(item is not file_item) wvpass(isinstance(meta, Metadata)) wvpasseq(vfs.default_file_mode, meta.mode) wvpasseq((0, 0, 0, 0, 0), (meta.uid, meta.gid, meta.atime, meta.mtime, meta.ctime)) wvpass(augmented.meta.size is None) wvpasseq(file_size, augmented_w_size.meta.size) ## Ensure symlinks are handled properly mode_item = link_item._replace(meta=vfs.default_symlink_mode) augmented = vfs.augment_item_meta(repo, mode_item) wvpass(augmented is not mode_item) wvpass(isinstance(augmented.meta, Metadata)) wvpasseq(link_target, augmented.meta.symlink_target) wvpasseq(len(link_target), augmented.meta.size) augmented = vfs.augment_item_meta(repo, mode_item, include_size=True) wvpass(augmented is not mode_item) wvpass(isinstance(augmented.meta, Metadata)) wvpasseq(link_target, augmented.meta.symlink_target) wvpasseq(len(link_target), augmented.meta.size)
def run_augment_item_meta_tests(repo, file_path, file_size, link_path, link_target): _, file_item = vfs.resolve(repo, file_path)[-1] _, link_item = vfs.resolve(repo, link_path, follow=False)[-1] wvpass(isinstance(file_item.meta, Metadata)) wvpass(isinstance(link_item.meta, Metadata)) # Note: normally, modifying item.meta values is forbidden file_item.meta.size = file_item.meta.size or vfs.item_size(repo, file_item) link_item.meta.size = link_item.meta.size or vfs.item_size(repo, link_item) ## Ensure a fully populated item is left alone augmented = vfs.augment_item_meta(repo, file_item) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) augmented = vfs.augment_item_meta(repo, file_item, include_size=True) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) ## Ensure a missing size is handled poperly file_item.meta.size = None augmented = vfs.augment_item_meta(repo, file_item) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) augmented = vfs.augment_item_meta(repo, file_item, include_size=True) wvpass(augmented is not file_item) wvpasseq(file_size, augmented.meta.size) ## Ensure a meta mode is handled properly mode_item = file_item._replace(meta=vfs.default_file_mode) augmented = vfs.augment_item_meta(repo, mode_item) augmented_w_size = vfs.augment_item_meta(repo, mode_item, include_size=True) for item in (augmented, augmented_w_size): meta = item.meta wvpass(item is not file_item) wvpass(isinstance(meta, Metadata)) wvpasseq(vfs.default_file_mode, meta.mode) wvpasseq((0, 0, 0, 0, 0), (meta.uid, meta.gid, meta.atime, meta.mtime, meta.ctime)) wvpass(augmented.meta.size is None) wvpasseq(file_size, augmented_w_size.meta.size) ## Ensure symlinks are handled properly mode_item = link_item._replace(meta=vfs.default_symlink_mode) augmented = vfs.augment_item_meta(repo, mode_item) wvpass(augmented is not mode_item) wvpass(isinstance(augmented.meta, Metadata)) wvpasseq(link_target, augmented.meta.symlink_target) wvpasseq(len(link_target), augmented.meta.size) augmented = vfs.augment_item_meta(repo, mode_item, include_size=True) wvpass(augmented is not mode_item) wvpass(isinstance(augmented.meta, Metadata)) wvpasseq(link_target, augmented.meta.symlink_target) wvpasseq(len(link_target), augmented.meta.size)
def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) git.check_repo_or_die() if not extra: o.fatal('must specify a target') if len(extra) > 1: o.fatal('only one target file allowed') if opt.bupm and opt.meta: o.fatal('--meta and --bupm are incompatible') target = argv_bytes(extra[0]) if not re.match(br'/*[^/]+/[^/]+', target): o.fatal("path %r doesn't include a branch and revision" % target) with LocalRepo() as repo: resolved = vfs.resolve(repo, target, follow=False) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % (b'/'.join(name for name, item in resolved), target)) sys.exit(1) mode = vfs.item_mode(leaf_item) sys.stdout.flush() out = byte_stream(sys.stdout) if opt.bupm: if not stat.S_ISDIR(mode): o.fatal('%r is not a directory' % target) _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid) if bupm_oid: with vfs.tree_data_reader(repo, bupm_oid) as meta_stream: out.write(meta_stream.read()) elif opt.meta: augmented = vfs.augment_item_meta(repo, leaf_item, include_size=True) out.write(augmented.meta.encode()) else: if stat.S_ISREG(mode): with vfs.fopen(repo, leaf_item) as f: for b in chunkyreader(f): out.write(b) else: o.fatal('%r is not a plain file' % target) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1)
def _get_file(self, repo, path, resolved): """Process a request on a file. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ file_item = resolved[-1][1] file_item = vfs.augment_item_meta(repo, file_item, include_size=True) meta = file_item.meta ctype = self._guess_type(path) self.set_header("Last-Modified", http_date_from_utc_ns(meta.mtime)) self.set_header("Content-Type", ctype) self.set_header("Content-Length", str(meta.size)) assert len(file_item.oid) == 20 self.set_header("Etag", hexlify(file_item.oid)) if self.request.method != 'HEAD': with vfs.fopen(self.repo, file_item) as f: it = chunkyreader(f) for blob in chunkyreader(f): self.write(blob) raise gen.Return()
def _get_file(self, repo, path, resolved): """Process a request on a file. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ file_item = resolved[-1][1] file_item = vfs.augment_item_meta(repo, file_item, include_size=True) meta = file_item.meta ctype = self._guess_type(path) self.set_header("Last-Modified", http_date_from_utc_ns(meta.mtime)) self.set_header("Content-Type", ctype) self.set_header("Content-Length", str(meta.size)) assert len(file_item.oid) == 20 self.set_header("Etag", file_item.oid.encode('hex')) if self.request.method != 'HEAD': with vfs.fopen(self.repo, file_item) as f: it = chunkyreader(f) for blob in chunkyreader(f): self.write(blob) raise gen.Return()
def getattr(self, path): global opt if self.verbose > 0: log('--getattr(%r)\n' % path) res = vfs.resolve(self.repo, path, want_meta=(not self.fake_metadata), follow=False) name, item = res[-1] if not item: return -errno.ENOENT if self.fake_metadata: item = vfs.augment_item_meta(self.repo, item, include_size=True) else: item = vfs.ensure_item_has_metadata(self.repo, item, include_size=True) meta = item.meta # FIXME: do we want/need to do anything more with nlink? st = fuse.Stat(st_mode=meta.mode, st_nlink=1, st_size=meta.size) st.st_mode = meta.mode st.st_uid = meta.uid st.st_gid = meta.gid st.st_atime = max(0, xstat.fstime_floor_secs(meta.atime)) st.st_mtime = max(0, xstat.fstime_floor_secs(meta.mtime)) st.st_ctime = max(0, xstat.fstime_floor_secs(meta.ctime)) return st
def restore(repo, parent_path, name, item, top, sparse, numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks): global total_restored mode = vfs.item_mode(item) treeish = S_ISDIR(mode) fullname = parent_path + b'/' + name # Match behavior of index --exclude-rx with respect to paths. if should_rx_exclude_path(fullname + (b'/' if treeish else b''), exclude_rxs): return if not treeish: # Do this now so we'll have meta.symlink_target for verbose output item = vfs.augment_item_meta(repo, item, include_size=True) meta = item.meta assert (meta.mode == mode) if stat.S_ISDIR(mode): if verbosity >= 1: out.write(b'%s/\n' % fullname) elif stat.S_ISLNK(mode): assert (meta.symlink_target) if verbosity >= 2: out.write(b'%s@ -> %s\n' % (fullname, meta.symlink_target)) else: if verbosity >= 2: out.write(fullname + b'\n') orig_cwd = os.getcwd() try: if treeish: # Assumes contents() returns '.' with the full metadata first sub_items = vfs.contents(repo, item, want_meta=True) dot, item = next(sub_items, None) assert (dot == b'.') item = vfs.augment_item_meta(repo, item, include_size=True) meta = item.meta meta.create_path(name) os.chdir(name) total_restored += 1 if verbosity >= 0: qprogress('Restoring: %d\r' % total_restored) for sub_name, sub_item in sub_items: restore(repo, fullname, sub_name, sub_item, top, sparse, numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) os.chdir(b'..') apply_metadata(meta, name, numeric_ids, owner_map) else: created_hardlink = False if meta.hardlink_target: created_hardlink = hardlink_if_possible( fullname, item, top, hardlinks) if not created_hardlink: meta.create_path(name) if stat.S_ISREG(meta.mode): if sparse: write_file_content_sparsely(repo, name, item) else: write_file_content(repo, name, item) total_restored += 1 if verbosity >= 0: qprogress('Restoring: %d\r' % total_restored) if not created_hardlink: apply_metadata(meta, name, numeric_ids, owner_map) finally: os.chdir(orig_cwd)
def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) verbosity = (opt.verbose or 0) if not opt.quiet else -1 if opt.remote: opt.remote = argv_bytes(opt.remote) if opt.outdir: opt.outdir = argv_bytes(opt.outdir) git.check_repo_or_die() if not extra: o.fatal('must specify at least one filename to restore') exclude_rxs = parse_rx_excludes(flags, o.fatal) owner_map = {} for map_type in ('user', 'group', 'uid', 'gid'): owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal) if opt.outdir: mkdirp(opt.outdir) os.chdir(opt.outdir) repo = RemoteRepo(opt.remote) if opt.remote else LocalRepo() top = fsencode(os.getcwd()) hardlinks = {} for path in [argv_bytes(x) for x in extra]: if not valid_restore_path(path): add_error("path %r doesn't include a branch and revision" % path) continue try: resolved = vfs.resolve(repo, path, want_meta=True, follow=False) except vfs.IOError as e: add_error(e) continue if len(resolved) == 3 and resolved[2][0] == b'latest': # Follow latest symlink to the actual save try: resolved = vfs.resolve(repo, b'latest', parent=resolved[:-1], want_meta=True) except vfs.IOError as e: add_error(e) continue # Rename it back to 'latest' resolved = tuple(elt if i != 2 else (b'latest', ) + elt[1:] for i, elt in enumerate(resolved)) path_parent, path_name = os.path.split(path) leaf_name, leaf_item = resolved[-1] if not leaf_item: add_error('error: cannot access %r in %r' % (b'/'.join(name for name, item in resolved), path)) continue if not path_name or path_name == b'.': # Source is /foo/what/ever/ or /foo/what/ever/. -- extract # what/ever/* to the current directory, and if name == '.' # (i.e. /foo/what/ever/.), then also restore what/ever's # metadata to the current directory. treeish = vfs.item_mode(leaf_item) if not treeish: add_error('%r cannot be restored as a directory' % path) else: items = vfs.contents(repo, leaf_item, want_meta=True) dot, leaf_item = next(items, None) assert dot == b'.' for sub_name, sub_item in items: restore(repo, b'', sub_name, sub_item, top, opt.sparse, opt.numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) if path_name == b'.': leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) apply_metadata(leaf_item.meta, b'.', opt.numeric_ids, owner_map) else: restore(repo, b'', leaf_name, leaf_item, top, opt.sparse, opt.numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) if verbosity >= 0: progress('Restoring: %d, done.\n' % total_restored) die_if_errors()
resolved = vfs.resolve(repo, target, follow=False) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % ('/'.join(name for name, item in resolved), path)) sys.exit(1) mode = vfs.item_mode(leaf_item) if opt.bupm: if not stat.S_ISDIR(mode): o.fatal('%r is not a directory' % target) _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid) if bupm_oid: with vfs.tree_data_reader(repo, bupm_oid) as meta_stream: sys.stdout.write(meta_stream.read()) elif opt.meta: augmented = vfs.augment_item_meta(repo, leaf_item, include_size=True) sys.stdout.write(augmented.meta.encode()) else: if stat.S_ISREG(mode): with vfs.fopen(repo, leaf_item) as f: for b in chunkyreader(f): sys.stdout.write(b) else: o.fatal('%r is not a plain file' % target) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1)
def main(): o = options.Options(optspec) opt, flags, extra = o.parse(sys.argv[1:]) verbosity = opt.verbose if not opt.quiet else -1 git.check_repo_or_die() if not extra: o.fatal('must specify at least one filename to restore') exclude_rxs = parse_rx_excludes(flags, o.fatal) owner_map = {} for map_type in ('user', 'group', 'uid', 'gid'): owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal) if opt.outdir: mkdirp(opt.outdir) os.chdir(opt.outdir) repo = RemoteRepo(opt.remote) if opt.remote else LocalRepo() top = os.getcwd() hardlinks = {} for path in extra: if not valid_restore_path(path): add_error("path %r doesn't include a branch and revision" % path) continue try: resolved = vfs.resolve(repo, path, want_meta=True, follow=False) except vfs.IOError as e: add_error(e) continue if len(resolved) == 3 and resolved[2][0] == 'latest': # Follow latest symlink to the actual save try: resolved = vfs.resolve(repo, 'latest', parent=resolved[:-1], want_meta=True) except vfs.IOError as e: add_error(e) continue # Rename it back to 'latest' resolved = tuple(elt if i != 2 else ('latest',) + elt[1:] for i, elt in enumerate(resolved)) path_parent, path_name = os.path.split(path) leaf_name, leaf_item = resolved[-1] if not leaf_item: add_error('error: cannot access %r in %r' % ('/'.join(name for name, item in resolved), path)) continue if not path_name or path_name == '.': # Source is /foo/what/ever/ or /foo/what/ever/. -- extract # what/ever/* to the current directory, and if name == '.' # (i.e. /foo/what/ever/.), then also restore what/ever's # metadata to the current directory. treeish = vfs.item_mode(leaf_item) if not treeish: add_error('%r cannot be restored as a directory' % path) else: items = vfs.contents(repo, leaf_item, want_meta=True) dot, leaf_item = next(items, None) assert(dot == '.') for sub_name, sub_item in items: restore(repo, '', sub_name, sub_item, top, opt.sparse, opt.numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) if path_name == '.': leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) apply_metadata(leaf_item.meta, '.', opt.numeric_ids, owner_map) else: restore(repo, '', leaf_name, leaf_item, top, opt.sparse, opt.numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) if verbosity >= 0: progress('Restoring: %d, done.\n' % total_restored) die_if_errors()
def within_repo(repo, opt): if opt.commit_hash: opt.hash = True def item_line(item, name): return item_info(item, name, show_hash=opt.hash, commit_hash=opt.commit_hash, long_fmt=opt.long_listing, classification=opt.classification, numeric_ids=opt.numeric_ids, human_readable=opt.human_readable) ret = 0 pending = [] for path in opt.paths: try: if opt.directory: resolved = vfs.resolve(repo, path, follow=False) else: resolved = vfs.try_resolve(repo, path) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % ('/'.join(name for name, item in resolved), path)) ret = 1 continue if not opt.directory and S_ISDIR(vfs.item_mode(leaf_item)): items = vfs.contents(repo, leaf_item) if opt.show_hidden == 'all': # Match non-bup "ls -a ... /". parent = resolved[-2] if len(resolved) > 1 else resolved[0] items = chain(items, (('..', parent[1]), )) for sub_name, sub_item in sorted(items, key=lambda x: x[0]): if opt.show_hidden != 'all' and sub_name == '.': continue if sub_name.startswith('.') and \ opt.show_hidden not in ('almost', 'all'): continue if opt.l: sub_item = vfs.ensure_item_has_metadata( repo, sub_item, include_size=True) else: sub_item = vfs.augment_item_meta(repo, sub_item, include_size=True) line = item_line(sub_item, sub_name) if not opt.long_listing and istty1: pending.append(line) else: print(line) else: leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) line = item_line(leaf_item, os.path.normpath(path)) if not opt.long_listing and istty1: pending.append(line) else: print(line) except vfs.IOError as ex: log('bup: %s\n' % ex) ret = 1 if pending: sys.stdout.write(columnate(pending, '')) return ret
def show_paths(repo, opt, paths, out, pwd, should_columnate, prefix=b''): def item_line(item, name): return item_info(item, prefix + name, show_hash=opt.hash, commit_hash=opt.commit_hash, long_fmt=opt.long_listing, classification=opt.classification, numeric_ids=opt.numeric_ids, human_readable=opt.human_readable) ret = 0 want_meta = bool(opt.long_listing or opt.classification) pending = [] last_n = len(paths) - 1 for n, printpath in enumerate(paths): path = posixpath.join(pwd, printpath) try: if last_n > 0: out.write(b'%s:\n' % printpath) if opt.directory: resolved = vfs.resolve(repo, path, follow=False) else: resolved = vfs.try_resolve(repo, path, want_meta=want_meta) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % ('/'.join(path_msg(name) for name, item in resolved), path_msg(path))) ret = 1 continue if not opt.directory and S_ISDIR(vfs.item_mode(leaf_item)): items = vfs.contents(repo, leaf_item, want_meta=want_meta) if opt.show_hidden == 'all': # Match non-bup "ls -a ... /". parent = resolved[-2] if len(resolved) > 1 else resolved[0] items = chain(items, ((b'..', parent[1]), )) for sub_name, sub_item in sorted(items, key=lambda x: x[0]): if opt.show_hidden != 'all' and sub_name == b'.': continue if sub_name.startswith(b'.') and \ opt.show_hidden not in ('almost', 'all'): continue # always skip . and .. in the subfolders - already printed it anyway if prefix and sub_name in (b'.', b'..'): continue if opt.l: sub_item = vfs.ensure_item_has_metadata( repo, sub_item, include_size=True) elif want_meta: sub_item = vfs.augment_item_meta(repo, sub_item, include_size=True) line = item_line(sub_item, sub_name) if should_columnate: pending.append(line) else: out.write(line) out.write(b'\n') # recurse into subdirectories (apart from . and .., of course) if opt.recursive and S_ISDIR( vfs.item_mode(sub_item)) and sub_name not in ( b'.', b'..'): show_paths(repo, opt, [path + b'/' + sub_name], out, pwd, should_columnate, prefix=prefix + sub_name + b'/') else: if opt.long_listing: leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) line = item_line(leaf_item, os.path.normpath(path)) if should_columnate: pending.append(line) else: out.write(line) out.write(b'\n') except vfs.IOError as ex: log('bup: %s\n' % ex) ret = 1 if pending: out.write(columnate(pending, b'')) pending = [] if n < last_n: out.write(b'\n') return ret
resolved = vfs.lresolve(repo, target) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % ('/'.join(name for name, item in resolved), path)) sys.exit(1) mode = vfs.item_mode(leaf_item) if opt.bupm: if not stat.S_ISDIR(mode): o.fatal('%r is not a directory' % target) _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid) if bupm_oid: with vfs.tree_data_reader(repo, bupm_oid) as meta_stream: sys.stdout.write(meta_stream.read()) elif opt.meta: augmented = vfs.augment_item_meta(repo, leaf_item, include_size=True) sys.stdout.write(augmented.meta.encode()) else: if stat.S_ISREG(mode): with vfs.fopen(repo, leaf_item) as f: for b in chunkyreader(f): sys.stdout.write(b) else: o.fatal('%r is not a plain file' % target) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1)
def within_repo(repo, opt): if opt.commit_hash: opt.hash = True def item_line(item, name): return item_info(item, name, show_hash=opt.hash, commit_hash=opt.commit_hash, long_fmt=opt.long_listing, classification=opt.classification, numeric_ids=opt.numeric_ids, human_readable=opt.human_readable) ret = 0 pending = [] for path in opt.paths: try: if opt.directory: resolved = vfs.resolve(repo, path, follow=False) else: resolved = vfs.try_resolve(repo, path) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % ('/'.join(name for name, item in resolved), path)) ret = 1 continue if not opt.directory and S_ISDIR(vfs.item_mode(leaf_item)): items = vfs.contents(repo, leaf_item) if opt.show_hidden == 'all': # Match non-bup "ls -a ... /". parent = resolved[-2] if len(resolved) > 1 else resolved[0] items = chain(items, (('..', parent[1]),)) for sub_name, sub_item in sorted(items, key=lambda x: x[0]): if opt.show_hidden != 'all' and sub_name == '.': continue if sub_name.startswith('.') and \ opt.show_hidden not in ('almost', 'all'): continue if opt.l: sub_item = vfs.ensure_item_has_metadata(repo, sub_item, include_size=True) else: sub_item = vfs.augment_item_meta(repo, sub_item, include_size=True) line = item_line(sub_item, sub_name) if not opt.long_listing and istty1: pending.append(line) else: print(line) else: leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) line = item_line(leaf_item, os.path.normpath(path)) if not opt.long_listing and istty1: pending.append(line) else: print(line) except vfs.IOError as ex: log('bup: %s\n' % ex) ret = 1 if pending: sys.stdout.write(columnate(pending, '')) return ret
def restore(repo, parent_path, name, item, top, sparse, numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks): global total_restored mode = vfs.item_mode(item) treeish = S_ISDIR(mode) fullname = parent_path + '/' + name # Match behavior of index --exclude-rx with respect to paths. if should_rx_exclude_path(fullname + ('/' if treeish else ''), exclude_rxs): return if not treeish: # Do this now so we'll have meta.symlink_target for verbose output item = vfs.augment_item_meta(repo, item, include_size=True) meta = item.meta assert(meta.mode == mode) if stat.S_ISDIR(mode): if verbosity >= 1: print('%s/' % fullname) elif stat.S_ISLNK(mode): assert(meta.symlink_target) if verbosity >= 2: print('%s@ -> %s' % (fullname, meta.symlink_target)) else: if verbosity >= 2: print(fullname) orig_cwd = os.getcwd() try: if treeish: # Assumes contents() returns '.' with the full metadata first sub_items = vfs.contents(repo, item, want_meta=True) dot, item = next(sub_items, None) assert(dot == '.') item = vfs.augment_item_meta(repo, item, include_size=True) meta = item.meta meta.create_path(name) os.chdir(name) total_restored += 1 if verbosity >= 0: qprogress('Restoring: %d\r' % total_restored) for sub_name, sub_item in sub_items: restore(repo, fullname, sub_name, sub_item, top, sparse, numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) os.chdir('..') apply_metadata(meta, name, numeric_ids, owner_map) else: created_hardlink = False if meta.hardlink_target: created_hardlink = hardlink_if_possible(fullname, item, top, hardlinks) if not created_hardlink: meta.create_path(name) if stat.S_ISREG(meta.mode): if sparse: write_file_content_sparsely(repo, name, item) else: write_file_content(repo, name, item) total_restored += 1 if verbosity >= 0: qprogress('Restoring: %d\r' % total_restored) if not created_hardlink: apply_metadata(meta, name, numeric_ids, owner_map) finally: os.chdir(orig_cwd)