def override_update(orig, ui, repo, *pats, **opts): bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s # Need to lock between the standins getting updated and their bfiles getting updated wlock = repo.wlock() try: if opts['check']: mod = len(modified) > 0 for bfile in unsure: standin = bfutil.standin(bfile) if repo['.'][standin].data().strip() != bfutil.hashfile(repo.wjoin(bfile)): mod = True else: bfdirstate.normal(bfutil.unixpath(bfile)) bfdirstate.write() if mod: raise util.Abort(_('uncommitted local changes')) # XXX handle removed differently if not opts['clean']: for bfile in unsure + modified + added: bfutil.update_standin(repo, bfutil.standin(bfile)) finally: wlock.release() return orig(ui, repo, *pats, **opts)
def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add .hgbfiles, we need to remove that now for pat in pats: if match_.patkind(pat) == None and bfutil.short_name in pat: newpats.append(pat.replace( bfutil.short_name, '')) else: newpats.append(pat) match = oldmatch(repo, newpats, opts, globbed, default) m = copy.copy(match) bfile = lambda f: bfutil.standin(f) in manifest m._files = [bfutil.standin(f) for f in m._files if bfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: bfutil.is_standin(f) and bfile(bfutil.split_standin(f)) and orig_matchfn(bfutil.split_standin(f)) or None return m
def getfilectx(repo, memctx, f): if bfutil.is_standin(f): # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this srcfname = bfutil.split_standin(f) try: fctx = ctx.filectx(srcfname) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: # standin is always a bfile because bfileness # doesn't change after rename or copy renamed = bfutil.standin(renamed[0]) return context.memfilectx(f, bfiletohash[srcfname], 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed)
def override_forget(orig, ui, repo, *pats, **opts): wctx = repo[None].manifest() oldmatch = cmdutil.match def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx m._files = [f for f in m._files if notbfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: orig_matchfn(f) and notbfile(f) return m cmdutil.match = override_match orig(ui, repo, *pats, **opts) cmdutil.match = oldmatch m = cmdutil.match(repo, pats, opts) try: repo.bfstatus = True s = repo.status(match=m, clean=True) finally: repo.bfstatus = False forget = sorted(s[0] + s[1] + s[3] + s[6]) forget = [f for f in forget if bfutil.standin(f) in wctx] for f in forget: if bfutil.standin(f) not in repo.dirstate and not os.path.isdir(m.rel(bfutil.standin(f))): ui.warn(_('not removing %s: file is already untracked\n') % m.rel(f)) for f in forget: if ui.verbose or not m.exact(f): ui.status(_('removing %s\n') % m.rel(f)) # Need to lock because standin files are deleted then removed from the repository # and we could race inbetween. wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) for f in forget: bfdirstate.remove(bfutil.unixpath(f)) bfdirstate.write() bfutil.repo_remove(repo, [bfutil.standin(f) for f in forget], unlink=True) finally: wlock.release()
def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx m._files = [f for f in m._files if notbfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: orig_matchfn(f) and notbfile(f) return m
def getfilectx(repo, memctx, f): if bfutil.standin(f) in files: # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this try: fctx = ctx.filectx(bfutil.standin(f)) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = bfutil.splitstandin(renamed[0]) hash = fctx.data().strip() path = bfutil.findfile(rsrc, hash) ### TODO: What if the file is not cached? data = '' fd = None try: fd = open(path, 'rb') data = fd.read() finally: if fd: fd.close() return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed)
def override_revert(orig, ui, repo, *pats, **opts): # Because we put the standins in a bad state (by updating them) and then return them # to a correct state we need to lock to prevent others from changing them in their # incorrect state. wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) (modified, added, removed, missing, unknown, ignored, clean) = bfutil.bfdirstate_status(bfdirstate, repo, repo['.'].rev()) for bfile in modified: bfutil.update_standin(repo, bfutil.standin(bfile)) oldmatch = cmdutil.match try: ctx = repo[opts.get('rev')] def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) def tostandin(f): if bfutil.standin(f) in repo[None] or bfutil.standin(f) in ctx: return bfutil.standin(f) return f m._files = [tostandin(f) for f in m._files] m._fmap = set(m._files) orig_matchfn = m.matchfn def matchfn(f): if bfutil.is_standin(f): return orig_matchfn(bfutil.split_standin(f)) and (f in repo[None] or f in ctx) return orig_matchfn(f) m.matchfn = matchfn return m cmdutil.match = override_match orig(ui, repo, *pats, **opts) finally: cmdutil.match = oldmatch bfcommands.revert_bfiles(ui, repo) for bfile in modified: if os.path.exists(repo.wjoin(bfutil.standin(bfile))) and bfile in repo['.']: bfutil.write_standin(repo, bfutil.standin(bfile), repo['.'][bfile].data().strip(), 'x' in repo['.'][bfile].flags()) finally: wlock.release()
def getfilectx(repo, memctx, f): if bfutil.standin(f) in files: # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this try: fctx = ctx.filectx(bfutil.standin(f)) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = bfutil.split_standin(renamed[0]) hash = fctx.data().strip() path = bfutil.find_file(rsrc, hash) ### TODO: What if the file is not cached? data = '' with open(path, 'rb') as fd: data = fd.read() return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed)
def _bfconvert_addchangeset(rsrc, rdst, ctx, revmap, bfiles, normalfiles, matcher, size, bfiletohash): # Convert src parents to dst parents parents = [] for p in ctx.parents(): parents.append(revmap[p.node()]) while len(parents) < 2: parents.append(node.nullid) # Generate list of changed files files = set(ctx.files()) if node.nullid not in parents: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) dstfiles = [] for f in files: if f not in bfiles and f not in normalfiles: isbfile = _is_bfile(f, ctx, matcher, size) # If this file was renamed or copied then copy # the bfileness of its predecessor if f in ctx.manifest(): fctx = ctx.filectx(f) renamed = fctx.renamed() renamedbfile = renamed and renamed[0] in bfiles isbfile |= renamedbfile if 'l' in fctx.flags(): if renamedbfile: raise util.Abort( _('Renamed/copied bfile %s becomes symlink') % f) isbfile = False if isbfile: bfiles.add(f) else: normalfiles.add(f) if f in bfiles: dstfiles.append(bfutil.standin(f)) # bfile in manifest if it has not been removed/renamed if f in ctx.manifest(): if 'l' in ctx.filectx(f).flags(): if renamed and renamed[0] in bfiles: raise util.Abort(_('bfile %s becomes symlink') % f) # bfile was modified, update standins fullpath = rdst.wjoin(f) bfutil.create_dir(os.path.dirname(fullpath)) m = util.sha1('') m.update(ctx[f].data()) hash = m.hexdigest() if f not in bfiletohash or bfiletohash[f] != hash: with open(fullpath, 'wb') as fd: fd.write(ctx[f].data()) executable = 'x' in ctx[f].flags() os.chmod(fullpath, bfutil.get_mode(executable)) bfutil.write_standin(rdst, bfutil.standin(f), hash, executable) bfiletohash[f] = hash else: # normal file dstfiles.append(f) def getfilectx(repo, memctx, f): if bfutil.is_standin(f): # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this srcfname = bfutil.split_standin(f) try: fctx = ctx.filectx(srcfname) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: # standin is always a bfile because bfileness # doesn't change after rename or copy renamed = bfutil.standin(renamed[0]) return context.memfilectx(f, bfiletohash[srcfname], 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) # Commit mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, getfilectx, ctx.user(), ctx.date(), ctx.extra()) ret = rdst.commitctx(mctx) rdst.dirstate.setparents(ret) revmap[ctx.node()] = rdst.changelog.tip()
def updatebfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.openbfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) unsure, modified, added, removed, missing, unknown, ignored, clean = s bfiles = bfutil.listbfiles(repo) toget = [] at = 0 updated = 0 removed = 0 printed = False if bfiles: ui.status(_('getting changed bfiles\n')) printed = True for bfile in bfiles: at += 1 if os.path.exists(repo.wjoin(bfile)) and not \ os.path.exists(repo.wjoin(bfutil.standin(bfile))): os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) continue expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists(repo.wjoin(bfile)) or expectedhash != \ bfutil.hashfile(repo.wjoin(bfile)): path = bfutil.findfile(repo, expectedhash) if not path: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != \ os.stat(repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) if toget: store = basestore._openstore(repo) (success, missing) = store.get(toget) else: success, missing = [],[] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(filename)) for bfile in bfdirstate: if bfile not in bfiles: if os.path.exists(repo.wjoin(bfile)): if not printed: ui.status(_('getting changed bfiles\n')) printed = True os.unlink(repo.wjoin(bfile)) removed += 1 path = bfutil.unixpath(bfile) try: # Mercurial >= 1.9 bfdirstate.drop(path) except AttributeError: # Mercurial <= 1.8 bfdirstate.forget(path) bfdirstate.write() if printed: ui.status(_('%d big files updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def revertbfiles(ui, repo, filelist=None): wlock = repo.wlock() try: bfdirstate = bfutil.openbfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) unsure, modified, added, removed, missing, unknown, ignored, clean = s bfiles = bfutil.listbfiles(repo) toget = [] at = 0 updated = 0 for bfile in bfiles: if filelist is None or bfile in filelist: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.remove(bfile) continue if os.path.exists(repo.wjoin(bfutil.standin(os.path.join(bfile\ + '.orig')))): shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + \ '.orig')) at += 1 expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists(repo.wjoin(bfile)) or expectedhash != \ bfutil.hashfile(repo.wjoin(bfile)): path = bfutil.findfile(repo, expectedhash) if path is None: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin(bfile)] \ .data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != \ os.stat(repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == \ repo['.'][bfutil.standin(bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) if toget: store = basestore._openstore(repo) success, missing = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 if bfutil.standin(filename) not in repo['.']: bfdirstate.add(bfutil.unixpath(filename)) elif hash == repo['.'][bfutil.standin(filename)].data().strip(): bfdirstate.normal(bfutil.unixpath(filename)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(filename)) removed = 0 for bfile in bfdirstate: if filelist is None or bfile in filelist: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): if os.path.exists(repo.wjoin(bfile)): os.unlink(repo.wjoin(bfile)) removed += 1 if bfutil.standin(bfile) in repo['.']: bfdirstate.remove(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) else: state = repo.dirstate[bfutil.standin(bfile)] if state == 'n': bfdirstate.normal(bfile) elif state == 'r': bfdirstate.remove(bfile) elif state == 'a': bfdirstate.add(bfile) elif state == '?': try: # Mercurial >= 1.9 bfdirstate.drop(bfile) except AttributeError: # Mercurial <= 1.8 bfdirstate.forget(bfile) bfdirstate.write() finally: wlock.release()
def _bfconvert_addchangeset(rsrc, rdst, ctx, revmap, bfiles, normalfiles, matcher, size, bfiletohash): # Convert src parents to dst parents parents = [] for p in ctx.parents(): parents.append(revmap[p.node()]) while len(parents) < 2: parents.append(node.nullid) # Generate list of changed files files = set(ctx.files()) if node.nullid not in parents: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) dstfiles = [] for f in files: if f not in bfiles and f not in normalfiles: isbfile = _isbfile(f, ctx, matcher, size) # If this file was renamed or copied then copy # the bfileness of its predecessor if f in ctx.manifest(): fctx = ctx.filectx(f) renamed = fctx.renamed() renamedbfile = renamed and renamed[0] in bfiles isbfile |= renamedbfile if 'l' in fctx.flags(): if renamedbfile: raise util.Abort( _('Renamed/copied bfile %s becomes symlink') % f) isbfile = False if isbfile: bfiles.add(f) else: normalfiles.add(f) if f in bfiles: dstfiles.append(bfutil.standin(f)) # bfile in manifest if it has not been removed/renamed if f in ctx.manifest(): if 'l' in ctx.filectx(f).flags(): if renamed and renamed[0] in bfiles: raise util.Abort(_('bfile %s becomes symlink') % f) # bfile was modified, update standins fullpath = rdst.wjoin(f) bfutil.createdir(os.path.dirname(fullpath)) m = util.sha1('') m.update(ctx[f].data()) hash = m.hexdigest() if f not in bfiletohash or bfiletohash[f] != hash: try: fd = open(fullpath, 'wb') fd.write(ctx[f].data()) finally: if fd: fd.close() executable = 'x' in ctx[f].flags() os.chmod(fullpath, bfutil.getmode(executable)) bfutil.writestandin(rdst, bfutil.standin(f), hash, executable) bfiletohash[f] = hash else: # normal file dstfiles.append(f) def getfilectx(repo, memctx, f): if bfutil.isstandin(f): # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this srcfname = bfutil.splitstandin(f) try: fctx = ctx.filectx(srcfname) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: # standin is always a bfile because bfileness # doesn't change after rename or copy renamed = bfutil.standin(renamed[0]) return context.memfilectx(f, bfiletohash[srcfname], 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) # Commit mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, getfilectx, ctx.user(), ctx.date(), ctx.extra()) ret = rdst.commitctx(mctx) rdst.dirstate.setparents(ret) revmap[ctx.node()] = rdst.changelog.tip()
def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False, subrepos=None): listignored, listclean, listunknown = ignored, clean, unknown if not self.bfstatus: try: return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown, subrepos) except TypeError: return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown) else: # some calls in this function rely on the old version of status self.bfstatus = False if isinstance(node1, context.changectx): ctx1 = node1 else: ctx1 = repo[node1] if isinstance(node2, context.changectx): ctx2 = node2 else: ctx2 = repo[node2] working = ctx2.rev() is None parentworking = working and ctx1 == self['.'] def inctx(file, ctx): try: if ctx.rev() is None: return file in ctx.manifest() ctx[file] return True except: return False # create a copy of match that matches standins instead of bfiles # if matcher not set then it is the always matcher so overwrite that if match is None: match = match_.always(self.root, self.getcwd()) def tostandin(file): if inctx(bfutil.standin(file), ctx2): return bfutil.standin(file) return file m = copy.copy(match) m._files = [tostandin(f) for f in m._files] # get ignored clean and unknown but remove them later if they were not asked for try: result = super(bfiles_repo, self).status(node1, node2, m, True, True, True, subrepos) except TypeError: result = super(bfiles_repo, self).status(node1, node2, m, True, True, True) if working: # Hold the wlock while we read bfiles and update the bfdirstate wlock = repo.wlock() try: # Any non bfiles that were explicitly listed must be taken out or # bfdirstate.status will report an error. The status of these files # was already computed using super's status. bfdirstate = bfutil.open_bfdirstate(ui, self) match._files = [f for f in match._files if f in bfdirstate] s = bfdirstate.status(match, [], listignored, listclean, listunknown) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s if parentworking: for bfile in unsure: if ctx1[bfutil.standin(bfile)].data().strip() != bfutil.hashfile(self.wjoin(bfile)): modified.append(bfile) else: clean.append(bfile) bfdirstate.normal(bfutil.unixpath(bfile)) bfdirstate.write() else: tocheck = unsure + modified + added + clean modified, added, clean = [], [], [] for bfile in tocheck: standin = bfutil.standin(bfile) if inctx(standin, ctx1): if ctx1[standin].data().strip() != bfutil.hashfile(self.wjoin(bfile)): modified.append(bfile) else: clean.append(bfile) else: added.append(bfile) finally: wlock.release() for standin in ctx1.manifest(): if not bfutil.is_standin(standin): continue bfile = bfutil.split_standin(standin) if not match(bfile): continue if bfile not in bfdirstate: removed.append(bfile) # Handle unknown and ignored differently bfiles = (modified, added, removed, missing, [], [], clean) result = list(result) # Unknown files result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.is_standin(f)] # Ignored files must be ignored by both the dirstate and bfdirstate result[5] = set(ignored).intersection(set(result[5])) # combine normal files and bfiles normals = [[fn for fn in filelist if not bfutil.is_standin(fn)] for filelist in result] result = [sorted(list1 + list2) for (list1, list2) in zip(normals, bfiles)] else: def toname(f): if bfutil.is_standin(f): return bfutil.split_standin(f) return f result = [[toname(f) for f in items] for items in result] if not listunknown: result[4] = [] if not listignored: result[5] = [] if not listclean: result[6] = [] self.bfstatus = True return result
def tostandin(file): if inctx(bfutil.standin(file), ctx2): return bfutil.standin(file) return file
def commit(self, text="", user=None, date=None, match=None, force=False, editor=False, extra={}): orig = super(bfiles_repo, self).commit wlock = repo.wlock() try: # Case 1: user calls commit with no specific files or # include/exclude patterns: refresh and commit everything. if (match is None) or (not match.anypats() and not match.files()): bfiles = bfutil.list_bfiles(self) bfdirstate = bfutil.open_bfdirstate(ui, self) # this only loops through bfiles that exist (not removed/renamed) for bfile in bfiles: if os.path.exists(self.wjoin(bfutil.standin(bfile))): bfutil.update_standin(self, bfutil.standin(bfile)) bfdirstate.normal(bfutil.unixpath(bfile)) for bfile in bfdirstate: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() return orig(text=text, user=user, date=date, match=match, force=force, editor=editor, extra=extra) for file in match.files(): if bfutil.is_standin(file): raise util.Abort("Don't commit bfile standin. Commit bfile.") # Case 2: user calls commit with specified patterns: refresh any # matching big files. smatcher = bfutil.compose_standin_matcher(self, match) standins = bfutil.dirstate_walk(self.dirstate, smatcher) # No matching big files: get out of the way and pass control to # the usual commit() method. if not standins: return orig(text=text, user=user, date=date, match=match, force=force, editor=editor, extra=extra) # Refresh all matching big files. It's possible that the commit # will end up failing, in which case the big files will stay # refreshed. No harm done: the user modified them and asked to # commit them, so sooner or later we're going to refresh the # standins. Might as well leave them refreshed. bfdirstate = bfutil.open_bfdirstate(ui, self) for standin in standins: bfile = bfutil.split_standin(standin) if bfdirstate[bfile] is not 'r': bfutil.update_standin(self, standin) bfdirstate.normal(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() # Cook up a new matcher that only matches regular files or # standins corresponding to the big files requested by the user. # Have to modify _files to prevent commit() from complaining # "not tracked" for big files. bfiles = bfutil.list_bfiles(repo) match = copy.copy(match) orig_matchfn = match.matchfn # Check both the list of bfiles and the list of standins because if a bfile was removed, it # won't be in the list of bfiles at this point match._files += sorted(standins) actualfiles = [] for f in match._files: fstandin = bfutil.standin(f) # Ignore known bfiles and standins if f in bfiles or fstandin in standins: continue # Append directory separator to avoid collisions if not fstandin.endswith(os.sep): fstandin += os.sep # Prevalidate matching standin directories if any(st for st in match._files if st.startswith(fstandin)): continue actualfiles.append(f) match._files = actualfiles def matchfn(f): if orig_matchfn(f): return f not in bfiles else: return f in standins match.matchfn = matchfn return orig(text=text, user=user, date=date, match=match, force=force, editor=editor, extra=extra) finally: wlock.release()
def override_add(orig, ui, repo, *pats, **opts): bf = opts.pop('bf', None) bfsize = opts.pop('bfsize', None) if bfsize: try: bfsize = int(bfsize) except ValueError: raise util.Abort(_('size must be an integer, was %s\n') % bfsize) else: if os.path.exists(repo.wjoin(bfutil.short_name)): bfsize = ui.config(bfutil.long_name, 'size', default='10') if bfsize: try: bfsize = int(bfsize) except ValueError: raise util.Abort(_('bfiles.size must be integer, was %s\n') % bfsize) bfmatcher = None if os.path.exists(repo.wjoin(bfutil.short_name)): bfpats = ui.config(bfutil.long_name, 'patterns', default=()) if bfpats: bfpats = bfpats.split(' ') bfmatcher = match_.match(repo.root, '', list(bfpats)) bfnames = [] m = cmdutil.match(repo, pats, opts) m.bad = lambda x,y: None wctx = repo[None] for f in repo.walk(m): exact = m.exact(f) bfile = bfutil.standin(f) in wctx nfile = f in wctx if exact and bfile: ui.warn(_('%s already a bfile\n') % f) continue # Don't warn the user when they attempt to add a normal tracked file. The normal add code # will do that for us. if exact and nfile: continue if exact or (not bfile and not nfile): if bf or (bfsize and os.path.getsize(repo.wjoin(f)) >= bfsize*1024*1024) \ or (bfmatcher and bfmatcher(f)): bfnames.append(f) if ui.verbose or not exact: ui.status(_('adding %s as bfile\n') % m.rel(f)) bad = [] standins = [] # Need to lock otherwise there could be a race condition inbetween when standins are created # and added to the repo wlock = repo.wlock() try: if not opts.get('dry_run'): bfdirstate = bfutil.open_bfdirstate(ui, repo) for f in bfnames: standinname = bfutil.standin(f) bfutil.write_standin(repo, standinname, hash='', executable=bfutil.get_executable(repo.wjoin(f))) standins.append(standinname) if bfdirstate[bfutil.unixpath(f)] == 'r': bfdirstate.normallookup(bfutil.unixpath(f)) else: bfdirstate.add(bfutil.unixpath(f)) bfdirstate.write() bad += [bfutil.split_standin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()] finally: wlock.release() oldmatch = cmdutil.match manifest = repo[None].manifest() def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest m._files = [f for f in m._files if notbfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: notbfile(f) and orig_matchfn(f) or None return m cmdutil.match = override_match result = orig(ui, repo, *pats, **opts) cmdutil.match = oldmatch return (result is 1 or bad) and 1 or 0
def tostandin(f): if bfutil.standin(f) in repo[None] or bfutil.standin(f) in ctx: return bfutil.standin(f) return f
def override_remove(orig, ui, repo, *pats, **opts): wctx = repo[None].manifest() oldmatch = cmdutil.match def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx m._files = [f for f in m._files if notbfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: orig_matchfn(f) and notbfile(f) return m cmdutil.match = override_match orig(ui, repo, *pats, **opts) cmdutil.match = oldmatch after, force = opts.get('after'), opts.get('force') if not pats and not after: raise util.Abort(_('no files specified')) m = cmdutil.match(repo, pats, opts) try: repo.bfstatus = True s = repo.status(match=m, clean=True) finally: repo.bfstatus = False modified, added, deleted, clean = [[f for f in list if bfutil.standin(f) in wctx] for list in [s[0], s[1], s[3], s[6]]] def warn(files, reason): for f in files: ui.warn(_('not removing %s: file %s (use -f to force removal)\n') % (m.rel(f), reason)) if force: remove, forget = modified + deleted + clean, added elif after: remove, forget = deleted, [] warn(modified + added + clean, _('still exists')) else: remove, forget = deleted + clean, [] warn(modified, _('is modified')) warn(added, _('has been marked for add')) for f in sorted(remove + forget): if ui.verbose or not m.exact(f): ui.status(_('removing %s\n') % m.rel(f)) # Need to lock because standin files are deleted then removed from the repository # and we could race inbetween. wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) for f in remove: if not after: os.unlink(repo.wjoin(f)) currentdir = os.path.split(f)[0] while currentdir and not os.listdir(repo.wjoin(currentdir)): os.rmdir(repo.wjoin(currentdir)) currentdir = os.path.split(currentdir)[0] bfdirstate.remove(bfutil.unixpath(f)) bfdirstate.write() forget = [bfutil.standin(f) for f in forget] remove = [bfutil.standin(f) for f in remove] bfutil.repo_forget(repo, forget) bfutil.repo_remove(repo, remove, unlink=True) finally: wlock.release()
def revert_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 for bfile in bfiles: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.remove(bfile) continue if os.path.exists( repo.wjoin(bfutil.standin(os.path.join(bfile + '.orig')))): shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + '.orig')) at += 1 expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists( repo.wjoin(bfile)) or expectedhash != bfutil.hashfile( repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if path is None: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin( bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat( repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin( bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 if bfutil.standin(filename) not in repo['.']: bfdirstate.add(bfutil.unixpath(filename)) elif hash == repo['.'][bfutil.standin(filename)].data().strip(): bfdirstate.normal(bfutil.unixpath(filename)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(filename)) removed = 0 for bfile in bfdirstate: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): if os.path.exists(repo.wjoin(bfile)): os.unlink(repo.wjoin(bfile)) removed += 1 if bfutil.standin(bfile) in repo['.']: bfdirstate.remove(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) else: state = repo.dirstate[bfutil.standin(bfile)] if state == 'n': bfdirstate.normal(bfile) elif state == 'r': bfdirstate.remove(bfile) elif state == 'a': bfdirstate.add(bfile) elif state == '?': bfdirstate.forget(bfile) bfdirstate.write() finally: wlock.release()
def update_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 removed = 0 printed = False if bfiles: ui.status(_('Getting changed bfiles\n')) printed = True for bfile in bfiles: at += 1 if os.path.exists(repo.wjoin(bfile)) and not os.path.exists( repo.wjoin(bfutil.standin(bfile))): os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) continue expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists( repo.wjoin(bfile)) or expectedhash != bfutil.hashfile( repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if not path: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat( repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(filename)) for bfile in bfdirstate: if bfile not in bfiles: if os.path.exists(repo.wjoin(bfile)): if not printed: ui.status(_('Getting changed bfiles\n')) printed = True os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() if printed: ui.status( _('%d big files updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def makestandin(relpath): return os.path.join(os.path.relpath('.', repo.getcwd()), bfutil.standin(util.canonpath(repo.root, repo.getcwd(), relpath)))