def _recursive_dirlist(prepend, xdev, bup_dir=None, excluded_paths=None, exclude_rxs=None, xdev_exceptions=frozenset()): for (name,pst) in _dirlist(): path = prepend + name if excluded_paths: if os.path.normpath(path) in excluded_paths: debug1('Skipping %r: excluded.\n' % path_msg(path)) continue if exclude_rxs and should_rx_exclude_path(path, exclude_rxs): continue if name.endswith(b'/'): if bup_dir != None: if os.path.normpath(path) == bup_dir: debug1('Skipping BUP_DIR.\n') continue if xdev != None and pst.st_dev != xdev \ and path not in xdev_exceptions: debug1('Skipping contents of %r: different filesystem.\n' % path_msg(path)) else: try: OsFile(name).fchdir() except OSError as e: add_error('%s: %s' % (prepend, e)) else: for i in _recursive_dirlist(prepend=prepend+name, xdev=xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs, xdev_exceptions=xdev_exceptions): yield i os.chdir(b'..') yield (path, pst)
def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) git.check_repo_or_die() tags = [t for sublist in git.tags().values() for t in sublist] if opt.delete: # git.delete_ref() doesn't complain if a ref doesn't exist. We # could implement this verification but we'd need to read in the # contents of the tag file and pass the hash, and we already know # about the tag's existance via "tags". tag_name = argv_bytes(opt.delete) if not opt.force and tag_name not in tags: log("error: tag '%s' doesn't exist\n" % path_msg(tag_name)) sys.exit(1) tag_file = b'refs/tags/%s' % tag_name git.delete_ref(tag_file) sys.exit(0) if not extra: for t in tags: sys.stdout.flush() out = byte_stream(sys.stdout) out.write(t) out.write(b'\n') sys.exit(0) elif len(extra) != 2: o.fatal('expected commit ref and hash') tag_name, commit = map(argv_bytes, extra[:2]) if not tag_name: o.fatal("tag name must not be empty.") debug1("args: tag name = %s; commit = %s\n" % (path_msg(tag_name), commit.decode('ascii'))) if tag_name in tags and not opt.force: log("bup: error: tag '%s' already exists\n" % path_msg(tag_name)) sys.exit(1) if tag_name.startswith(b'.'): o.fatal("'%s' is not a valid tag name." % path_msg(tag_name)) try: hash = git.rev_parse(commit) except git.GitError as e: log("bup: error: %s" % e) sys.exit(2) if not hash: log("bup: error: commit %s not found.\n" % commit.decode('ascii')) sys.exit(2) with git.PackIdxList(git.repo(b'objects/pack')) as pL: if not pL.exists(hash): log("bup: error: commit %s not found.\n" % commit.decode('ascii')) sys.exit(2) git.update_ref(b'refs/tags/' + tag_name, hash, None, force=True)
def extract(file, restore_numeric_ids=False, create_symlinks=True): # For now, just store all the directories and handle them last, # longest first. all_dirs = [] for meta in _ArchiveIterator(file): if not meta: # Hit end record. break xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % path_msg(meta.path))) else: meta.path = xpath if verbose: print('+', path_msg(meta.path), file=sys.stderr) _set_up_path(meta, create_symlinks=create_symlinks) if os.path.isdir(meta.path): all_dirs.append(meta) else: if verbose: print('=', path_msg(meta.path), file=sys.stderr) meta.apply_to_path(restore_numeric_ids=restore_numeric_ids) all_dirs.sort(key = lambda x : len(x.path), reverse=True) for dir in all_dirs: # Don't need to check xpath -- won't be in all_dirs if not OK. xpath = _clean_up_extract_path(dir.path) if verbose: print('=', path_msg(xpath), file=sys.stderr) # Shouldn't have to check for risky paths here (omitted above). dir.apply_to_path(path=dir.path, restore_numeric_ids=restore_numeric_ids)
def init_repo(path=None): """Create the Git bare repository for bup in a given path.""" guess_repo(path) d = repo() # appends a / to the path parent = os.path.dirname(os.path.dirname(d)) if parent and not os.path.exists(parent): raise GitError('parent directory "%s" does not exist\n' % path_msg(parent)) if os.path.exists(d) and not os.path.isdir(os.path.join(d, b'.')): raise GitError('"%s" exists but is not a directory\n' % path_msg(d)) p = subprocess.Popen([b'git', b'--bare', b'init'], stdout=sys.stderr, env=_gitenv()) _git_wait('git init', p) # Force the index version configuration in order to ensure bup works # regardless of the version of the installed Git binary. p = subprocess.Popen([b'git', b'config', b'pack.indexVersion', '2'], stdout=sys.stderr, env=_gitenv()) _git_wait('git config', p) # Enable the reflog p = subprocess.Popen( [b'git', b'config', b'core.logAllRefUpdates', b'true'], stdout=sys.stderr, env=_gitenv()) _git_wait('git config', p)
def _list_directory(self, path, resolution): """Helper to produce a directory listing. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ if not path.endswith(b'/') and len(path) > 0: print('Redirecting from %s to %s' % (path_msg(path), path_msg(path + b'/'))) return self.redirect(path + b'/', permanent=True) hidden_arg = self.request.arguments.get('hidden', [0])[-1] try: show_hidden = int(hidden_arg) except ValueError as e: show_hidden = False self.render('list-directory.html', path=path, breadcrumbs=_compute_breadcrumbs(path, show_hidden), files_hidden=_contains_hidden_files( self.repo, resolution[-1][1]), hidden_shown=show_hidden, dir_contents=_dir_contents(self.repo, resolution, show_hidden=show_hidden))
def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False): if not self.posix1e_acl: return if not apply_acl: add_error("%s: can't restore ACLs; posix1e support missing.\n" % path_msg(path)) return try: acls = self.posix1e_acl offs = 1 if restore_numeric_ids else 0 if len(acls) > 2: apply_acl(path, acls[offs], acls[offs + 2]) else: apply_acl(path, acls[offs]) except IOError as e: if e.errno == errno.EINVAL: # libacl returns with errno set to EINVAL if a user # (or group) doesn't exist raise ApplyError("POSIX1e ACL: can't create %r for %r" % (acls, path_msg(path))) elif e.errno == errno.EPERM or e.errno == errno.EOPNOTSUPP: raise ApplyError('POSIX1e ACL applyto: %s' % e) else: raise
def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False): if not xattr: if self.linux_xattr: add_error("%s: can't restore xattr; xattr support missing.\n" % path_msg(path)) return if not self.linux_xattr: return try: existing_xattrs = set(xattr.list(path, nofollow=True)) except IOError as e: if e.errno == errno.EACCES: raise ApplyError('xattr.set %r: %s' % (path_msg(path), e)) else: raise for k, v in self.linux_xattr: if k not in existing_xattrs \ or v != xattr.get(path, k, nofollow=True): try: xattr.set(path, k, v, nofollow=True) except IOError as e: if e.errno == errno.EPERM \ or e.errno == errno.EOPNOTSUPP: raise ApplyError('xattr.set %r: %s' % (path_msg(path), e)) else: raise existing_xattrs -= frozenset([k]) for k in existing_xattrs: try: xattr.remove(path, k, nofollow=True) except IOError as e: if e.errno in (errno.EPERM, errno.EACCES): raise ApplyError('xattr.remove %r: %s' % (path_msg(path), e)) else: raise
def check_midx(name): nicename = git.repo_rel(name) log('Checking %s.\n' % path_msg(nicename)) try: ix = git.open_idx(name) except git.GitError as e: add_error('%s: %s' % (path_msg(name), e)) return for count,subname in enumerate(ix.idxnames): sub = git.open_idx(os.path.join(os.path.dirname(name), subname)) for ecount,e in enumerate(sub): if not (ecount % 1234): qprogress(' %d/%d: %s %d/%d\r' % (count, len(ix.idxnames), git.shorten_hash(subname).decode('ascii'), ecount, len(sub))) if not sub.exists(e): add_error("%s: %s: %s missing from idx" % (path_msg(nicename), git.shorten_hash(subname).decode('ascii'), hexstr(e))) if not ix.exists(e): add_error("%s: %s: %s missing from midx" % (path_msg(nicename), git.shorten_hash(subname).decode('ascii'), hexstr(e))) prev = None for ecount,e in enumerate(ix): if not (ecount % 1234): qprogress(' Ordering: %d/%d\r' % (ecount, len(ix))) if e and prev and not e >= prev: add_error('%s: ordering error: %s < %s' % (nicename, hexstr(e), hexstr(prev))) prev = e
def __init__(self, filename): self.name = filename self.force_keep = False self.map = None assert(filename.endswith(b'.midx')) self.map = mmap_read(open(filename)) if self.map[0:4] != b'MIDX': log('Warning: skipping: invalid MIDX header in %r\n' % path_msg(filename)) self.force_keep = True return self._init_failed() ver = struct.unpack('!I', self.map[4:8])[0] if ver < MIDX_VERSION: log('Warning: ignoring old-style (v%d) midx %r\n' % (ver, path_msg(filename))) self.force_keep = False # old stuff is boring return self._init_failed() if ver > MIDX_VERSION: log('Warning: ignoring too-new (v%d) midx %r\n' % (ver, path_msg(filename))) self.force_keep = True # new stuff is exciting return self._init_failed() self.bits = _helpers.firstword(self.map[8:12]) self.entries = 2**self.bits self.fanout_ofs = 12 # fanout len is self.entries * 4 self.sha_ofs = self.fanout_ofs + self.entries * 4 self.nsha = self._fanget(self.entries - 1) # sha table len is self.nsha * 20 self.which_ofs = self.sha_ofs + 20 * self.nsha # which len is self.nsha * 4 self.idxnames = self.map[self.which_ofs + 4 * self.nsha:].split(b'\0')
def ruin_bloom(bloomfilename): rbloomfilename = git.repo_rel(bloomfilename) if not os.path.exists(bloomfilename): log(path_msg(bloomfilename) + '\n') add_error('bloom: %s not found to ruin\n' % path_msg(rbloomfilename)) return b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1) b.map[16:16 + 2**b.bits] = b'\0' * 2**b.bits
def report_live_item(n, total, ref_name, ref_id, item, verbosity): status = 'scanned %02.2f%%' % (n * 100.0 / total) hex_id = hexstr(ref_id) dirslash = b'/' if item.type == b'tree' else b'' chunk_path = item.chunk_path if chunk_path: if verbosity < 4: return ps = b'/'.join(item.path) chunk_ps = b'/'.join(chunk_path) log('%s %s:%s/%s%s\n' % (status, hex_id, path_msg(ps), path_msg(chunk_ps), path_msg(dirslash))) return # Top commit, for example has none. demangled = git.demangle_name(item.path[-1], item.mode)[0] if item.path \ else None # Don't print mangled paths unless the verbosity is over 3. if demangled: ps = b'/'.join(item.path[:-1] + [demangled]) if verbosity == 1: qprogress('%s %s:%s%s\r' % (status, hex_id, path_msg(ps), path_msg(dirslash))) elif (verbosity > 1 and item.type == b'tree') \ or (verbosity > 2 and item.type == b'blob'): log('%s %s:%s%s\n' % (status, hex_id, path_msg(ps), path_msg(dirslash))) elif verbosity > 3: ps = b'/'.join(item.path) log('%s %s:%s%s\n' % (status, hex_id, path_msg(ps), path_msg(dirslash)))
def _clean(self): names_seen = set() items = [] for item in self.items: if item.name in names_seen: parent_path = b'/'.join(n for n in self.namestack) + b'/' add_error('error: ignoring duplicate path %s in %s' % (path_msg(item.name), path_msg(parent_path))) else: names_seen.add(item.name) items.append(item) self.items = items
def remove_stale_files(new_pack_prefix): if verbosity and new_pack_prefix: log('created ' + path_msg(basename(new_pack_prefix)) + '\n') for p in ns.stale_files: if new_pack_prefix and p.startswith(new_pack_prefix): continue # Don't remove the new pack file if verbosity: log('removing ' + path_msg(basename(p)) + '\n') os.unlink(p) if ns.stale_files: # So git cat-pipe will close them cat_pipe.restart() ns.stale_files = []
def start_extract(file, create_symlinks=True): for meta in _ArchiveIterator(file): if not meta: # Hit end record. break if verbose: print(path_msg(meta.path), file=sys.stderr) xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % path_msg(meta.path))) else: meta.path = xpath _set_up_path(meta, create_symlinks=create_symlinks)
def bup_rm(repo, paths, compression=6, verbosity=None): dead_branches, dead_saves = dead_items(repo, paths) die_if_errors('not proceeding with any removals\n') updated_refs = {} # ref_name -> (original_ref, tip_commit(bin)) for branchname, branchitem in dead_branches.items(): ref = b'refs/heads/' + branchname assert(not ref in updated_refs) updated_refs[ref] = (branchitem.oid, None) if dead_saves: writer = git.PackWriter(compression_level=compression) try: for branch, saves in dead_saves.items(): assert(saves) updated_refs[b'refs/heads/' + branch] = rm_saves(saves, writer) except BaseException as ex: with pending_raise(ex): writer.abort() finally: writer.close() # Only update the refs here, at the very end, so that if something # goes wrong above, the old refs will be undisturbed. Make an attempt # to update each ref. for ref_name, info in updated_refs.items(): orig_ref, new_ref = info try: if not new_ref: git.delete_ref(ref_name, hexlify(orig_ref)) else: git.update_ref(ref_name, new_ref, orig_ref) if verbosity: log('updated %s (%s%s)\n' % (path_msg(ref_name), hexstr(orig_ref) + ' -> ' if orig_ref else '', hexstr(new_ref))) except (git.GitError, ClientError) as ex: if new_ref: add_error('while trying to update %s (%s%s): %s' % (path_msg(ref_name), hexstr(orig_ref) + ' -> ' if orig_ref else '', hexstr(new_ref), ex)) else: add_error('while trying to delete %r (%s): %s' % (ref_name, hexstr(orig_ref), ex))
def sync_indexes(self): self._require_command(b'list-indexes') self.check_busy() conn = self.conn mkdirp(self.cachedir) # All cached idxs are extra until proven otherwise extra = set() for f in os.listdir(self.cachedir): debug1(path_msg(f) + '\n') if f.endswith(b'.idx'): extra.add(f) needed = set() conn.write(b'list-indexes\n') for line in linereader(conn): if not line: break assert(line.find(b'/') < 0) parts = line.split(b' ') idx = parts[0] if len(parts) == 2 and parts[1] == b'load' and idx not in extra: # If the server requests that we load an idx and we don't # already have a copy of it, it is needed needed.add(idx) # Any idx that the server has heard of is proven not extra extra.discard(idx) self.check_ok() debug1('client: removing extra indexes: %s\n' % extra) for idx in extra: os.unlink(os.path.join(self.cachedir, idx)) debug1('client: server requested load of: %s\n' % needed) for idx in needed: self.sync_index(idx) git.auto_midx(self.cachedir)
def check_index(reader): try: log('check: checking forward iteration...\n') e = None d = {} for e in reader.forward_iter(): if e.children_n: if opt.verbose: log('%08x+%-4d %r\n' % (e.children_ofs, e.children_n, path_msg(e.name))) assert(e.children_ofs) assert e.name.endswith(b'/') assert(not d.get(e.children_ofs)) d[e.children_ofs] = 1 if e.flags & index.IX_HASHVALID: assert(e.sha != index.EMPTY_SHA) assert(e.gitmode) assert not e or bytes(e.name) == b'/' # last entry is *always* / log('check: checking normal iteration...\n') last = None for e in reader: if last: assert(last > e.name) last = e.name except: log('index error! at %r\n' % e) raise log('check: passed.\n')
def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if not extra: o.fatal('filenames expected') if opt.seed != None: random.seed(opt.seed) for name in extra: name = argv_bytes(name) log('Damaging "%s"...\n' % path_msg(name)) with open(name, 'r+b') as f: st = os.fstat(f.fileno()) size = st.st_size if opt.percent or opt.size: ms1 = int(float(opt.percent or 0) / 100.0 * size) or size ms2 = opt.size or size maxsize = min(ms1, ms2) else: maxsize = 1 chunks = opt.num or 10 chunksize = size // chunks for r in range(chunks): sz = random.randrange(1, maxsize + 1) if sz > size: sz = size if opt.equal: ofs = r * chunksize else: ofs = random.randrange(0, size - sz + 1) log(' %6d bytes at %d\n' % (sz, ofs)) f.seek(ofs) f.write(randblock(sz))
def sync_indexes(self): conn = self.conn mkdirp(self.cachedir) # All cached idxs are extra until proven otherwise extra = set() for f in os.listdir(self.cachedir): debug1(path_msg(f) + '\n') if f.endswith(b'.idx'): extra.add(f) needed = set() for idx, load in self._list_indexes(): if load: # If the server requests that we load an idx and we don't # already have a copy of it, it is needed needed.add(idx) # Any idx that the server has heard of is proven not extra extra.discard(idx) debug1('client: removing extra indexes: %s\n' % extra) for idx in extra: os.unlink(os.path.join(self.cachedir, idx)) debug1('client: server requested load of: %s\n' % needed) for idx in needed: self.sync_index(idx) git.auto_midx(self.cachedir)
def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal('no positional parameters expected') if not opt.check and opt.k and opt.k not in (4, 5): o.fatal('only k values of 4 and 5 are supported') if opt.check: opt.check = argv_bytes(opt.check) output = argv_bytes(opt.output) if opt.output else None if opt.dir: path = argv_bytes(opt.dir) else: path = LocalRepo().packdir() debug1('bloom: scanning %s\n' % path_msg(path)) outfilename = output or os.path.join(path, b'bup.bloom') if opt.check: check_bloom(path, outfilename, opt.check) elif opt.ruin: ruin_bloom(outfilename) else: do_bloom(path, outfilename, opt.k, opt.force) if saved_errors: log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors)) sys.exit(1) elif opt.check: log('All tests passed.\n')
def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) opt.dir = argv_bytes(opt.dir) if opt.dir else None opt.output = argv_bytes(opt.output) if opt.output else None if extra and (opt.auto or opt.force): o.fatal("you can't use -f/-a and also provide filenames") if opt.check and (not extra and not opt.auto): o.fatal("if using --check, you must provide filenames or -a") git.check_repo_or_die() if opt.max_files < 0: opt.max_files = max_files() assert (opt.max_files >= 5) extra = [argv_bytes(x) for x in extra] if opt.check: # check existing midx files if extra: midxes = extra else: path = opt.dir or git.repo(b'objects/pack') debug1('midx: scanning %s\n' % path) midxes = glob.glob(os.path.join(path, b'*.midx')) for name in midxes: check_midx(name) if not saved_errors: log('All tests passed.\n') else: if extra: sys.stdout.flush() do_midx(git.repo(b'objects/pack'), opt.output, extra, b'', byte_stream(sys.stdout), auto=opt.auto, force=opt.force, print_names=opt.print) elif opt.auto or opt.force: sys.stdout.flush() path = opt.dir or git.repo(b'objects/pack') debug1('midx: scanning %s\n' % path_msg(path)) do_midx_dir(path, opt.output, byte_stream(sys.stdout), auto=opt.auto, force=opt.force, max_files=opt.max_files) else: o.fatal("you must use -f or -a or provide input filenames") if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1)
def sync_index(self, name): mkdirp(self.cachedir) fn = os.path.join(self.cachedir, name) if os.path.exists(fn): msg = ("won't request existing .idx, try `bup bloom --check %s`" % path_msg(fn)) raise ClientError(msg) with atomically_replaced_file(fn, 'wb') as f: self.send_index(name, f, lambda size: None)
def __init__(self, repo, create=False): self.openset = set() self.path = repo.config(b'bup.path', opttype='path') if create: mkdirp(self.path) if not os.path.isdir(self.path): raise Exception( "FileStorage: %s doesn't exist or isn't a directory, need to init?" % path_msg(self.path))
def save_tree(output_file, paths, recurse=False, write_paths=True, save_symlinks=True, xdev=False): # Issue top-level rewrite warnings. for path in paths: safe_path = _clean_up_path_for_archive(path) if safe_path != path: log('archiving "%s" as "%s"\n' % (path_msg(path), path_msg(safe_path))) if not recurse: for p in paths: safe_path = _clean_up_path_for_archive(p) st = xstat.lstat(p) if stat.S_ISDIR(st.st_mode): safe_path += b'/' m = from_path(p, statinfo=st, archive_path=safe_path, save_symlinks=save_symlinks) if verbose: print(m.path, file=sys.stderr) m.write(output_file, include_path=write_paths) else: start_dir = os.getcwd() try: for (p, st) in recursive_dirlist(paths, xdev=xdev): dirlist_dir = os.getcwd() os.chdir(start_dir) safe_path = _clean_up_path_for_archive(p) m = from_path(p, statinfo=st, archive_path=safe_path, save_symlinks=save_symlinks) if verbose: print(m.path, file=sys.stderr) m.write(output_file, include_path=write_paths) os.chdir(dirlist_dir) finally: os.chdir(start_dir)
def _pop(force_tree, dir_metadata=None): # Leave the current archive directory and add its tree to its parent. assert(len(parts) >= 1) part = parts.pop() shalist = shalists.pop() metalist = metalists.pop() # FIXME: only test if collision is possible (i.e. given --strip, etc.)? if force_tree: tree = force_tree else: names_seen = set() clean_list = [] metaidx = 1 # entry at 0 is for the dir for x in shalist: name = x[1] if name in names_seen: parent_path = b'/'.join(parts) + b'/' add_error('error: ignoring duplicate path %s in %s' % (path_msg(name), path_msg(parent_path))) if not stat.S_ISDIR(x[0]): del metalist[metaidx] else: names_seen.add(name) clean_list.append(x) if not stat.S_ISDIR(x[0]): metaidx += 1 if dir_metadata: # Override the original metadata pushed for this dir. metalist = [(b'', dir_metadata)] + metalist[1:] sorted_metalist = sorted(metalist, key = lambda x : x[0]) metadata = b''.join([m[1].encode() for m in sorted_metalist]) metadata_f = BytesIO(metadata) mode, id = hashsplit.split_to_blob_or_tree(w.new_blob, w.new_tree, [metadata_f], keep_boundaries=False) clean_list.append((mode, b'.bupm', id)) tree = w.new_tree(clean_list) if shalists: shalists[-1].append((GIT_MODE_TREE, git.mangle_name(part, GIT_MODE_TREE, GIT_MODE_TREE), tree)) return tree
def clear_index(indexfile, verbose): indexfiles = [indexfile, indexfile + b'.meta', indexfile + b'.hlink'] for indexfile in indexfiles: try: os.remove(indexfile) if verbose: log('clear: removed %s\n' % path_msg(indexfile)) except OSError as e: if e.errno != errno.ENOENT: raise
def _pop(force_tree=None, dir_metadata=None): # Leave the current archive directory and add its tree to its parent. item = stack.pop() # FIXME: only test if collision is possible (i.e. given --strip, etc.)? if force_tree: tree = force_tree else: names_seen = set() clean_list = [] for x in item.items: name = x.name if name in names_seen: parent_path = b'/'.join(x.name for x in stack) + b'/' add_error('error: ignoring duplicate path %s in %s' % (path_msg(name), path_msg(parent_path))) else: names_seen.add(name) clean_list.append(x) # if set, overrides the original metadata pushed for this dir. if dir_metadata is None: dir_metadata = item.meta metalist = [(b'', dir_metadata)] metalist += [(git.shalist_item_sort_key( (entry.mode, entry.name, None)), entry.meta) for entry in clean_list if entry.mode != GIT_MODE_TREE] metalist.sort(key=lambda x: x[0]) metadata = BytesIO(b''.join(m[1].encode() for m in metalist)) mode, id = hashsplit.split_to_blob_or_tree(w.new_blob, w.new_tree, [metadata], keep_boundaries=False) shalist = [(mode, b'.bupm', id)] shalist += [(entry.gitmode, git.mangle_name(entry.name, entry.mode, entry.gitmode), entry.oid) for entry in clean_list] tree = w.new_tree(shalist) if stack: stack[-1].append(item.name, GIT_MODE_TREE, GIT_MODE_TREE, tree, None) return tree
def _list_directory(self, path, resolution): """Helper to produce a directory listing. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ if not path.endswith(b'/') and len(path) > 0: print('Redirecting from %s to %s' % (path_msg(path), path_msg(path + b'/'))) return self.redirect(path + b'/', permanent=True) args = QueryArgs.from_args(self.request.arguments) self.render('list-directory.html', path=path, breadcrumbs=_compute_breadcrumbs(path, args), files_hidden=_contains_hidden_files( self.repo, resolution[-1][1]), args=args, dir_contents=_dir_contents(self.repo, resolution, args))
def open_idx(filename): if filename.endswith(b'.idx'): f = open(filename, 'rb') header = f.read(8) if header[0:4] == b'\377tOc': version = struct.unpack('!I', header[4:8])[0] if version == 2: return PackIdxV2(filename, f) else: raise GitError('%s: expected idx file version 2, got %d' % (path_msg(filename), version)) elif len(header) == 8 and header[0:4] < b'\377tOc': return PackIdxV1(filename, f) else: raise GitError('%s: unrecognized idx file header' % path_msg(filename)) elif filename.endswith(b'.midx'): return midx.PackMidx(filename) else: raise GitError('idx filenames must end with .idx or .midx')
def clear_index(indexfile): indexfiles = [indexfile, indexfile + b'.meta', indexfile + b'.hlink'] for indexfile in indexfiles: path = git.repo(indexfile) try: os.remove(path) if opt.verbose: log('clear: removed %s\n' % path_msg(path)) except OSError as e: if e.errno != errno.ENOENT: raise