def update_hg_bookmarks(self, refs): try: oldbm = getattr(bookmarks, 'parse', None) if oldbm: bms = bookmarks.parse(self.repo) else: bms = self.repo._bookmarks heads = dict([(ref[11:],refs[ref]) for ref in refs if ref.startswith('refs/heads/')]) for head, sha in heads.iteritems(): # refs contains all the refs in the server, not just # the ones we are pulling if sha not in self.git.object_store: continue hgsha = bin(self.map_hg_get(sha)) if not head in bms: # new branch bms[head] = hgsha else: bm = self.repo[bms[head]] if bm.ancestor(self.repo[hgsha]) == bm: # fast forward bms[head] = hgsha if heads: if oldbm: bookmarks.write(self.repo, bms) else: self.repo._bookmarks = bms bookmarks.write(self.repo) except AttributeError: self.ui.warn(_('creating bookmarks failed, do you have' ' bookmarks enabled?\n'))
def updatebookmarks(repo, peer): remotemarks = peer.listkeys('bookmarks') localmarks = repo._bookmarks if not remotemarks: return for k, v in remotemarks.iteritems(): localmarks[k] = hgbin(v) if check_version(3, 6): lock = tr = None try: lock = repo.lock() tr = repo.transaction('bookmark') localmarks.recordchange(tr) tr.close() finally: if tr is not None: tr.release() if lock is not None: lock.release() else: if hasattr(localmarks, 'write'): localmarks.write() else: bookmarks.write(repo)
def putbookmarks(self, updatedbookmark): if not len(updatedbookmark): return self.ui.status(_("updating bookmarks\n")) for bookmark in updatedbookmark: self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark]) bookmarks.write(self.repo)
def updatebookmarks(repo, nstate, originalbookmarks, **opts): 'Move bookmarks to their correct changesets' for k, v in originalbookmarks.iteritems(): if v in nstate: if nstate[v] != nullmerge: # update the bookmarks for revs that have moved repo._bookmarks[k] = nstate[v] bookmarks.write(repo)
def update_hg_bookmarks(self, refs): try: oldbm = getattr(bookmarks, 'parse', None) if oldbm: bms = bookmarks.parse(self.repo) else: bms = self.repo._bookmarks heads = dict([(ref[11:],refs[ref]) for ref in refs if ref.startswith('refs/heads/')]) for head, sha in heads.iteritems(): # refs contains all the refs in the server, not just # the ones we are pulling if sha not in self.git.object_store: continue hgsha = bin(self.map_hg_get(sha)) if not head in bms: # new branch bms[head] = hgsha else: bm = self.repo[bms[head]] if bm.ancestor(self.repo[hgsha]) == bm: # fast forward bms[head] = hgsha # if there's a branch bookmark suffix, # then add it on to all bookmark names # that would otherwise conflict with a branch # name if self.branch_bookmark_suffix: real_branch_names = self.repo.branchmap() bms = dict( ( bm_name + self.branch_bookmark_suffix if bm_name in real_branch_names else bm_name, bms[bm_name] ) for bm_name in bms ) if heads: if oldbm: bookmarks.write(self.repo, bms) else: self.repo._bookmarks = bms if getattr(bms, 'write', None): # hg >= 2.5 bms.write() else: # hg < 2.5 bookmarks.write(self.repo) except AttributeError: self.ui.warn(_('creating bookmarks failed, do you have' ' bookmarks enabled?\n'))
def updatebookmarks(repo, nstate, originalbookmarks, **opts): 'Move bookmarks to their correct changesets' current = repo._bookmarkcurrent for k, v in originalbookmarks.iteritems(): if v in nstate: if nstate[v] != nullmerge: # reset the pointer if the bookmark was moved incorrectly if k != current: repo._bookmarks[k] = nstate[v] bookmarks.write(repo)
def kclone(ui, source, bookmark, dest=None): """Clone the source repo at the specified bookmark.""" r = hg.clone(ui, peeropts={}, source=source, dest=dest, rev=[bookmark]) if r is None: return 1 srcrepo, destrepo = r # Clear any bookmarks that were carried over. We don't want or need them. destrepo._bookmarks.clear() bookmarks.write(destrepo) # Save the bookmark that we're tracking so that we can use it later _set_bookmark(destrepo, bookmark)
def updatebookmarks(repo, peer): remotemarks = peer.listkeys('bookmarks') localmarks = repo._bookmarks if not remotemarks: return for k, v in remotemarks.iteritems(): localmarks[k] = hgbin(v) if hasattr(localmarks, 'write'): localmarks.write() else: bookmarks.write(repo)
def update_hg_bookmarks(self, refs): try: oldbm = getattr(bookmarks, 'parse', None) if oldbm: bms = bookmarks.parse(self.repo) else: bms = self.repo._bookmarks heads = dict([(ref[11:], refs[ref]) for ref in refs if ref.startswith('refs/heads/')]) for head, sha in heads.iteritems(): # refs contains all the refs in the server, not just # the ones we are pulling if sha not in self.git.object_store: continue hgsha = bin(self.map_hg_get(sha)) if not head in bms: # new branch bms[head] = hgsha else: bm = self.repo[bms[head]] if bm.ancestor(self.repo[hgsha]) == bm: # fast forward bms[head] = hgsha # if there's a branch bookmark suffix, # then add it on to all bookmark names # that would otherwise conflict with a branch # name if self.branch_bookmark_suffix: real_branch_names = self.repo.branchmap() bms = dict((bm_name + self.branch_bookmark_suffix if bm_name in real_branch_names else bm_name, bms[bm_name]) for bm_name in bms) if heads: if oldbm: bookmarks.write(self.repo, bms) else: self.repo._bookmarks = bms bookmarks.write(self.repo) except AttributeError: self.ui.warn( _('creating bookmarks failed, do you have' ' bookmarks enabled?\n'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing(repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [ c for c in parentctx.children() if c.node() not in existing ] action, currentnode = rules.pop(0) while newchildren: if action in [ 'f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ( 'f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] ( parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in ( created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % ( node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [ l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#' ] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] replacemap = {} tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) action, ha = rules.pop(0) ( parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) hexshort = lambda x: node.hex(x)[:12] if replaced_: clen, rlen = len(created_), len(replaced_) if clen == rlen == 1: ui.debug('histedit: exact replacement of %s with %s\n' % (hexshort(replaced_[0]), hexshort(created_[0]))) replacemap[replaced_[0]] = created_[0] elif clen > rlen: assert rlen == 1, ('unexpected replacement of ' '%d changes with %d changes' % (rlen, clen)) # made more changesets than we're replacing # TODO synthesize patch names for created patches replacemap[replaced_[0]] = created_[-1] ui.debug('histedit: created many, assuming %s replaced by %s' % (hexshort(replaced_[0]), hexshort(created_[-1]))) elif rlen > clen: if not created_: # This must be a drop. Try and put our metadata on # the parent change. assert rlen == 1 r = replaced_[0] ui.debug('histedit: %s seems replaced with nothing, ' 'finding a parent\n' % (hexshort(r))) pctx = repo[r].parents()[0] if pctx.node() in replacemap: ui.debug('histedit: parent is already replaced\n') replacemap[r] = replacemap[pctx.node()] else: replacemap[r] = pctx.node() ui.debug('histedit: %s best replaced by %s\n' % (hexshort(r), hexshort(replacemap[r]))) else: assert len(created_) == 1 for r in replaced_: ui.debug('histedit: %s replaced by %s\n' % (hexshort(r), hexshort(created_[0]))) replacemap[r] = created_[0] else: assert False, ('Unhandled case in replacement mapping! ' 'replacing %d changes with %d changes' % (rlen, clen)) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: if replacemap: ui.note('histedit: Should update metadata for the following ' 'changes:\n') for old, new in replacemap.iteritems(): if old in tmpnodes or old in created: # can't have any metadata we'd want to update continue while new in replacemap: new = replacemap[new] ui.note('histedit: %s to %s\n' % (hexshort(old), hexshort(new))) octx = repo[old] if bookmarks is not None: marks = octx.bookmarks() if marks: ui.note('histedit: moving bookmarks %s\n' % ', '.join(marks)) for mark in marks: repo._bookmarks[mark] = new bookmarks.write(repo) # TODO update mq state ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, key=lambda x: repo[x].rev()): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def strip(ui, repo, nodelist, backup="all"): cl = repo.changelog # TODO delete the undo files, and handle undo of merge sets if isinstance(nodelist, str): nodelist = [nodelist] striplist = [cl.rev(node) for node in nodelist] striprev = min(striplist) keeppartialbundle = backup == 'strip' # Some revisions with rev > striprev may not be descendants of striprev. # We have to find these revisions and put them in a bundle, so that # we can restore them after the truncations. # To create the bundle we use repo.changegroupsubset which requires # the list of heads and bases of the set of interesting revisions. # (head = revision in the set that has no descendant in the set; # base = revision in the set that has no ancestor in the set) tostrip = set(striplist) for rev in striplist: for desc in cl.descendants(rev): tostrip.add(desc) files = _collectfiles(repo, striprev) saverevs = _collectbrokencsets(repo, files, striprev) # compute heads saveheads = set(saverevs) for r in xrange(striprev + 1, len(cl)): if r not in tostrip: saverevs.add(r) saveheads.difference_update(cl.parentrevs(r)) saveheads.add(r) saveheads = [cl.node(r) for r in saveheads] # compute base nodes if saverevs: descendants = set(cl.descendants(*saverevs)) saverevs.difference_update(descendants) savebases = [cl.node(r) for r in saverevs] stripbases = [cl.node(r) for r in tostrip] bm = repo._bookmarks updatebm = [] for m in bm: rev = repo[bm[m]].rev() if rev in tostrip: updatebm.append(m) # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": backupfile = _bundle(repo, stripbases, cl.heads(), node, 'backup') repo.ui.status(_("saved backup bundle to %s\n") % backupfile) if saveheads or savebases: # do not compress partial bundle if we remove it from disk later chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', compress=keeppartialbundle) mfst = repo.manifest tr = repo.transaction("strip") offset = len(tr.entries) try: tr.startgroup() cl.strip(striprev, tr) mfst.strip(striprev, tr) for fn in files: repo.file(fn).strip(striprev, tr) tr.endgroup() try: for i in xrange(offset, len(tr.entries)): file, troffset, ignore = tr.entries[i] repo.sopener(file, 'a').truncate(troffset) tr.close() except: tr.abort() raise if saveheads or savebases: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) if not repo.ui.verbose: # silence internal shuffling chatter repo.ui.pushbuffer() repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) if not repo.ui.verbose: repo.ui.popbuffer() f.close() if not keeppartialbundle: os.unlink(chgrpfile) for m in updatebm: bm[m] = repo['.'].node() bookmarks.write(repo) except: if backupfile: ui.warn( _("strip failed, full bundle stored in '%s'\n") % backupfile) elif saveheads: ui.warn( _("strip failed, partial bundle stored in '%s'\n") % chgrpfile) raise repo.destroyed() # remove potential unknown phase # XXX using to_strip data would be faster phases.filterunknown(repo)
def strip(ui, repo, nodelist, backup="all", topic='backup'): cl = repo.changelog # TODO handle undo of merge sets if isinstance(nodelist, str): nodelist = [nodelist] striplist = [cl.rev(node) for node in nodelist] striprev = min(striplist) keeppartialbundle = backup == 'strip' # Some revisions with rev > striprev may not be descendants of striprev. # We have to find these revisions and put them in a bundle, so that # we can restore them after the truncations. # To create the bundle we use repo.changegroupsubset which requires # the list of heads and bases of the set of interesting revisions. # (head = revision in the set that has no descendant in the set; # base = revision in the set that has no ancestor in the set) tostrip = set(striplist) for rev in striplist: for desc in cl.descendants(rev): tostrip.add(desc) files = _collectfiles(repo, striprev) saverevs = _collectbrokencsets(repo, files, striprev) # compute heads saveheads = set(saverevs) for r in xrange(striprev + 1, len(cl)): if r not in tostrip: saverevs.add(r) saveheads.difference_update(cl.parentrevs(r)) saveheads.add(r) saveheads = [cl.node(r) for r in saveheads] # compute base nodes if saverevs: descendants = set(cl.descendants(*saverevs)) saverevs.difference_update(descendants) savebases = [cl.node(r) for r in saverevs] stripbases = [cl.node(r) for r in tostrip] bm = repo._bookmarks updatebm = [] for m in bm: rev = repo[bm[m]].rev() if rev in tostrip: updatebm.append(m) # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) repo.ui.status(_("saved backup bundle to %s\n") % backupfile) if saveheads or savebases: # do not compress partial bundle if we remove it from disk later chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', compress=keeppartialbundle) mfst = repo.manifest tr = repo.transaction("strip") offset = len(tr.entries) try: tr.startgroup() cl.strip(striprev, tr) mfst.strip(striprev, tr) for fn in files: repo.file(fn).strip(striprev, tr) tr.endgroup() try: for i in xrange(offset, len(tr.entries)): file, troffset, ignore = tr.entries[i] repo.sopener(file, 'a').truncate(troffset) tr.close() except: tr.abort() raise if saveheads or savebases: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) if not repo.ui.verbose: # silence internal shuffling chatter repo.ui.pushbuffer() repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) if not repo.ui.verbose: repo.ui.popbuffer() f.close() if not keeppartialbundle: os.unlink(chgrpfile) # remove undo files for undofile in repo.undofiles(): try: os.unlink(undofile) except OSError, e: if e.errno != errno.ENOENT: ui.warn(_('error removing %s: %s\n') % (undofile, str(e))) for m in updatebm: bm[m] = repo['.'].node() bookmarks.write(repo)
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing( repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ['f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter(lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ('f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] replacemap = {} tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) hexshort = lambda x: node.hex(x)[:12] if replaced_: clen, rlen = len(created_), len(replaced_) if clen == rlen == 1: ui.debug('histedit: exact replacement of %s with %s\n' % ( hexshort(replaced_[0]), hexshort(created_[0]))) replacemap[replaced_[0]] = created_[0] elif clen > rlen: assert rlen == 1, ('unexpected replacement of ' '%d changes with %d changes' % (rlen, clen)) # made more changesets than we're replacing # TODO synthesize patch names for created patches replacemap[replaced_[0]] = created_[-1] ui.debug('histedit: created many, assuming %s replaced by %s' % ( hexshort(replaced_[0]), hexshort(created_[-1]))) elif rlen > clen: if not created_: # This must be a drop. Try and put our metadata on # the parent change. assert rlen == 1 r = replaced_[0] ui.debug('histedit: %s seems replaced with nothing, ' 'finding a parent\n' % (hexshort(r))) pctx = repo[r].parents()[0] if pctx.node() in replacemap: ui.debug('histedit: parent is already replaced\n') replacemap[r] = replacemap[pctx.node()] else: replacemap[r] = pctx.node() ui.debug('histedit: %s best replaced by %s\n' % ( hexshort(r), hexshort(replacemap[r]))) else: assert len(created_) == 1 for r in replaced_: ui.debug('histedit: %s replaced by %s\n' % ( hexshort(r), hexshort(created_[0]))) replacemap[r] = created_[0] else: assert False, ( 'Unhandled case in replacement mapping! ' 'replacing %d changes with %d changes' % (rlen, clen)) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: if replacemap: ui.note('histedit: Should update metadata for the following ' 'changes:\n') for old, new in replacemap.iteritems(): if old in tmpnodes or old in created: # can't have any metadata we'd want to update continue while new in replacemap: new = replacemap[new] ui.note('histedit: %s to %s\n' % (hexshort(old), hexshort(new))) octx = repo[old] if bookmarks is not None: marks = octx.bookmarks() if marks: ui.note('histedit: moving bookmarks %s\n' % ', '.join(marks)) for mark in marks: repo._bookmarks[mark] = new bookmarks.write(repo) # TODO update mq state ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, key=lambda x: repo[x].rev()): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))