def getfilectx(repo, memctx, f): if bfutil.is_standin(f): # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this srcfname = bfutil.split_standin(f) try: fctx = ctx.filectx(srcfname) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: # standin is always a bfile because bfileness # doesn't change after rename or copy renamed = bfutil.standin(renamed[0]) return context.memfilectx(f, bfiletohash[srcfname], 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed)
def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca): # Use better variable names here. Because this is a wrapper we cannot change # the variable names in the function declaration. fcdest, fcother, fcancestor = fcd, fco, fca if not bfutil.is_standin(orig): return origfn(repo, mynode, orig, fcdest, fcother, fcancestor) else: if not fcother.cmp(fcdest): # files identical? return None if fcancestor == fcother: # backwards, use working dir parent as ancestor fcancestor = fcdest.parents()[0] if orig != fcother.path(): repo.ui.status(_('merging %s and %s to %s\n') % (bfutil.split_standin(orig), bfutil.split_standin(fcother.path()), bfutil.split_standin(fcdest.path()))) else: repo.ui.status(_('merging %s\n') % bfutil.split_standin(fcdest.path())) if fcancestor.path() != fcother.path() and fcother.data() == fcancestor.data(): return 0 if fcancestor.path() != fcdest.path() and fcdest.data() == fcancestor.data(): repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) return 0 if repo.ui.promptchoice(_('bfile %s has a merge conflict\n' 'keep (l)ocal or take (o)ther?') % bfutil.split_standin(orig), (_('&Local'), _('&Other')), 0) == 0: return 0 else: repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) return 0
def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx m._files = [f for f in m._files if notbfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: orig_matchfn(f) and notbfile(f) return m
def commitctx(self, *args, **kwargs): node = super(bfiles_repo, self).commitctx(*args, **kwargs) ctx = self[node] for filename in ctx.files(): if bfutil.is_standin(filename) and filename in ctx.manifest(): realfile = bfutil.split_standin(filename) bfutil.copy_to_cache(self, ctx.node(), realfile) return node
def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add .hgbfiles, we need to remove that now for pat in pats: if match_.patkind(pat) == None and bfutil.short_name in pat: newpats.append(pat.replace( bfutil.short_name, '')) else: newpats.append(pat) match = oldmatch(repo, newpats, opts, globbed, default) m = copy.copy(match) bfile = lambda f: bfutil.standin(f) in manifest m._files = [bfutil.standin(f) for f in m._files if bfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: bfutil.is_standin(f) and bfile(bfutil.split_standin(f)) and orig_matchfn(bfutil.split_standin(f)) or None return m
def get_outgoing_bfiles(ui, repo, dest=None, **opts): dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest, opts.get('branch')) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) if revs: revs = [repo.lookup(rev) for rev in revs] # Mercurial <= 1.5 had remoteui in cmdutil, then it moved to hg try: remoteui = cmdutil.remoteui except AttributeError: remoteui = hg.remoteui try: remote = hg.repository(remoteui(repo, opts), dest) except error.RepoError: return None o = bfutil.findoutgoing(repo, remote, False) if not o: return None o = repo.changelog.nodesbetween(o, revs)[0] if opts.get('newest_first'): o.reverse() toupload = set() for n in o: parents = [p for p in repo.changelog.parents(n) if p != node.nullid] ctx = repo[n] files = set(ctx.files()) if len(parents) == 2: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) toupload = toupload.union(set([f for f in files if bfutil.is_standin(f) and f in ctx])) return toupload
def _addchangeset(ui, rsrc, rdst, ctx, revmap): # Convert src parents to dst parents parents = [] for p in ctx.parents(): parents.append(revmap[p.node()]) while len(parents) < 2: parents.append(node.nullid) # Generate list of changed files files = set(ctx.files()) if node.nullid not in parents: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) def getfilectx(repo, memctx, f): if bfutil.standin(f) in files: # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this try: fctx = ctx.filectx(bfutil.standin(f)) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = bfutil.split_standin(renamed[0]) hash = fctx.data().strip() path = bfutil.find_file(rsrc, hash) ### TODO: What if the file is not cached? data = '' with open(path, 'rb') as fd: data = fd.read() return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) dstfiles = [] for file in files: if bfutil.is_standin(file): dstfiles.append(bfutil.split_standin(file)) else: dstfiles.append(file) # Commit mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, getfilectx, ctx.user(), ctx.date(), ctx.extra()) ret = rdst.commitctx(mctx) rdst.dirstate.setparents(ret) revmap[ctx.node()] = rdst.changelog.tip()
def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None, prefix=None, mtime=None, subrepos=None): # No need to lock because we are only reading history and bfile caches # neither of which are modified if kind not in archival.archivers: raise util.Abort(_("unknown archive type '%s'") % kind) ctx = repo[node] # In Mercurial <= 1.5 the prefix is passed to the archiver so try that # if that doesn't work we are probably in Mercurial >= 1.6 where the # prefix is not handled by the archiver try: archiver = archival.archivers[kind](dest, prefix, mtime or ctx.date()[0]) def write(name, mode, islink, getdata): if matchfn and not matchfn(name): return data = getdata() if decode: data = repo.wwritedata(name, data) archiver.addfile(name, mode, islink, data) except TypeError: if kind == 'files': if prefix: raise util.Abort(_('cannot give prefix when archiving to files')) else: prefix = archival.tidyprefix(dest, kind, prefix) def write(name, mode, islink, getdata): if matchfn and not matchfn(name): return data = getdata() if decode: data = repo.wwritedata(name, data) archiver.addfile(prefix + name, mode, islink, data) archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) if repo.ui.configbool("ui", "archivemeta", True): def metadata(): base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( hex(repo.changelog.node(0)), hex(node), ctx.branch()) tags = ''.join('tag: %s\n' % t for t in ctx.tags() if repo.tagtype(t) == 'global') if not tags: repo.ui.pushbuffer() opts = {'template': '{latesttag}\n{latesttagdistance}', 'style': '', 'patch': None, 'git': None} cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) ltags, dist = repo.ui.popbuffer().split('\n') tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) tags += 'latesttagdistance: %s\n' % dist return base + tags write('.hg_archival.txt', 0644, False, metadata) for f in ctx: ff = ctx.flags(f) getdata = ctx[f].data if bfutil.is_standin(f): path = bfutil.find_file(repo, getdata().strip()) ### TODO: What if the file is not cached? f = bfutil.split_standin(f) def getdatafn(): with open(path, 'rb') as fd: return fd.read() getdata = getdatafn write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) if subrepos: for subpath in ctx.substate: sub = ctx.sub(subpath) try: sub.archive(repo.ui, archiver, prefix) except TypeError: sub.archive(archiver, prefix) archiver.done()
def matchfn(f): if bfutil.is_standin(f): return orig_matchfn(bfutil.split_standin(f)) and (f in repo[None] or f in ctx) return orig_matchfn(f)
def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False, subrepos=None): listignored, listclean, listunknown = ignored, clean, unknown if not self.bfstatus: try: return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown, subrepos) except TypeError: return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown) else: # some calls in this function rely on the old version of status self.bfstatus = False if isinstance(node1, context.changectx): ctx1 = node1 else: ctx1 = repo[node1] if isinstance(node2, context.changectx): ctx2 = node2 else: ctx2 = repo[node2] working = ctx2.rev() is None parentworking = working and ctx1 == self['.'] def inctx(file, ctx): try: if ctx.rev() is None: return file in ctx.manifest() ctx[file] return True except: return False # create a copy of match that matches standins instead of bfiles # if matcher not set then it is the always matcher so overwrite that if match is None: match = match_.always(self.root, self.getcwd()) def tostandin(file): if inctx(bfutil.standin(file), ctx2): return bfutil.standin(file) return file m = copy.copy(match) m._files = [tostandin(f) for f in m._files] # get ignored clean and unknown but remove them later if they were not asked for try: result = super(bfiles_repo, self).status(node1, node2, m, True, True, True, subrepos) except TypeError: result = super(bfiles_repo, self).status(node1, node2, m, True, True, True) if working: # Hold the wlock while we read bfiles and update the bfdirstate wlock = repo.wlock() try: # Any non bfiles that were explicitly listed must be taken out or # bfdirstate.status will report an error. The status of these files # was already computed using super's status. bfdirstate = bfutil.open_bfdirstate(ui, self) match._files = [f for f in match._files if f in bfdirstate] s = bfdirstate.status(match, [], listignored, listclean, listunknown) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s if parentworking: for bfile in unsure: if ctx1[bfutil.standin(bfile)].data().strip() != bfutil.hashfile(self.wjoin(bfile)): modified.append(bfile) else: clean.append(bfile) bfdirstate.normal(bfutil.unixpath(bfile)) bfdirstate.write() else: tocheck = unsure + modified + added + clean modified, added, clean = [], [], [] for bfile in tocheck: standin = bfutil.standin(bfile) if inctx(standin, ctx1): if ctx1[standin].data().strip() != bfutil.hashfile(self.wjoin(bfile)): modified.append(bfile) else: clean.append(bfile) else: added.append(bfile) finally: wlock.release() for standin in ctx1.manifest(): if not bfutil.is_standin(standin): continue bfile = bfutil.split_standin(standin) if not match(bfile): continue if bfile not in bfdirstate: removed.append(bfile) # Handle unknown and ignored differently bfiles = (modified, added, removed, missing, [], [], clean) result = list(result) # Unknown files result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.is_standin(f)] # Ignored files must be ignored by both the dirstate and bfdirstate result[5] = set(ignored).intersection(set(result[5])) # combine normal files and bfiles normals = [[fn for fn in filelist if not bfutil.is_standin(fn)] for filelist in result] result = [sorted(list1 + list2) for (list1, list2) in zip(normals, bfiles)] else: def toname(f): if bfutil.is_standin(f): return bfutil.split_standin(f) return f result = [[toname(f) for f in items] for items in result] if not listunknown: result[4] = [] if not listignored: result[5] = [] if not listclean: result[6] = [] self.bfstatus = True return result
def push(self, remote, force=False, revs=None, newbranch=False): o = bfutil.findoutgoing(repo, remote, force) if o: toupload = set() o = repo.changelog.nodesbetween(o, revs)[0] for n in o: parents = [p for p in repo.changelog.parents(n) if p != node.nullid] ctx = repo[n] files = set(ctx.files()) if len(parents) == 2: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) toupload = toupload.union(set([ctx[f].data().strip() for f in files if bfutil.is_standin(f) and f in ctx])) bfcommands.upload_bfiles(ui, self, remote, toupload) # Mercurial >= 1.6 takes the newbranch argument, try that first. try: return super(bfiles_repo, self).push(remote, force, revs, newbranch) except TypeError: return super(bfiles_repo, self).push(remote, force, revs)
def commit(self, text="", user=None, date=None, match=None, force=False, editor=False, extra={}): orig = super(bfiles_repo, self).commit wlock = repo.wlock() try: # Case 1: user calls commit with no specific files or # include/exclude patterns: refresh and commit everything. if (match is None) or (not match.anypats() and not match.files()): bfiles = bfutil.list_bfiles(self) bfdirstate = bfutil.open_bfdirstate(ui, self) # this only loops through bfiles that exist (not removed/renamed) for bfile in bfiles: if os.path.exists(self.wjoin(bfutil.standin(bfile))): bfutil.update_standin(self, bfutil.standin(bfile)) bfdirstate.normal(bfutil.unixpath(bfile)) for bfile in bfdirstate: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() return orig(text=text, user=user, date=date, match=match, force=force, editor=editor, extra=extra) for file in match.files(): if bfutil.is_standin(file): raise util.Abort("Don't commit bfile standin. Commit bfile.") # Case 2: user calls commit with specified patterns: refresh any # matching big files. smatcher = bfutil.compose_standin_matcher(self, match) standins = bfutil.dirstate_walk(self.dirstate, smatcher) # No matching big files: get out of the way and pass control to # the usual commit() method. if not standins: return orig(text=text, user=user, date=date, match=match, force=force, editor=editor, extra=extra) # Refresh all matching big files. It's possible that the commit # will end up failing, in which case the big files will stay # refreshed. No harm done: the user modified them and asked to # commit them, so sooner or later we're going to refresh the # standins. Might as well leave them refreshed. bfdirstate = bfutil.open_bfdirstate(ui, self) for standin in standins: bfile = bfutil.split_standin(standin) if bfdirstate[bfile] is not 'r': bfutil.update_standin(self, standin) bfdirstate.normal(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() # Cook up a new matcher that only matches regular files or # standins corresponding to the big files requested by the user. # Have to modify _files to prevent commit() from complaining # "not tracked" for big files. bfiles = bfutil.list_bfiles(repo) match = copy.copy(match) orig_matchfn = match.matchfn # Check both the list of bfiles and the list of standins because if a bfile was removed, it # won't be in the list of bfiles at this point match._files += sorted(standins) actualfiles = [] for f in match._files: fstandin = bfutil.standin(f) # Ignore known bfiles and standins if f in bfiles or fstandin in standins: continue # Append directory separator to avoid collisions if not fstandin.endswith(os.sep): fstandin += os.sep # Prevalidate matching standin directories if any(st for st in match._files if st.startswith(fstandin)): continue actualfiles.append(f) match._files = actualfiles def matchfn(f): if orig_matchfn(f): return f not in bfiles else: return f in standins match.matchfn = matchfn return orig(text=text, user=user, date=date, match=match, force=force, editor=editor, extra=extra) finally: wlock.release()
def toname(f): if bfutil.is_standin(f): return bfutil.split_standin(f) return f