Esempio n. 1
0
def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
    '''generates bundle that will be send to the user

    returns tuple with raw bundle string and bundle type
    '''
    parts = []
    if not _needsrebundling(head, bundlerepo):
        with util.posixfile(bundlefile, "rb") as f:
            unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
            if isinstance(unbundler, changegroup.cg1unpacker):
                part = bundle2.bundlepart('changegroup',
                                          data=unbundler._stream.read())
                part.addparam('version', '01')
                parts.append(part)
            elif isinstance(unbundler, bundle2.unbundle20):
                haschangegroup = False
                for part in unbundler.iterparts():
                    if part.type == 'changegroup':
                        haschangegroup = True
                    newpart = bundle2.bundlepart(part.type, data=part.read())
                    for key, value in part.params.iteritems():
                        newpart.addparam(key, value)
                    parts.append(newpart)

                if not haschangegroup:
                    raise error.Abort(
                        'unexpected bundle without changegroup part, ' +
                        'head: %s' % hex(head),
                        hint='report to administrator')
            else:
                raise error.Abort('unknown bundle type')
    else:
        parts = _rebundle(bundlerepo, bundleroots, head)

    return parts
Esempio n. 2
0
 def applybundle(self):
     fp = self.opener()
     try:
         gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
         changegroup.addchangegroup(self.repo, gen, 'unshelve',
                                    'bundle:' + self.vfs.join(self.fname))
     finally:
         fp.close()
Esempio n. 3
0
 def applybundle(self):
     fp = self.opener()
     try:
         gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
         changegroup.addchangegroup(self.repo, gen, 'unshelve',
                                    'bundle:' + self.vfs.join(self.fname))
     finally:
         fp.close()
Esempio n. 4
0
def bundle_to_json( fh ):
    """
    Convert the received HG10xx data stream (a mercurial 1.0 bundle created using hg push from the
    command line) to a json object.
    """
    # See http://www.wstein.org/home/wstein/www/home/was/patches/hg_json
    hg_unbundle10_obj = readbundle( get_configured_ui(), fh, None )
    groups = [ group for group in unpack_groups( hg_unbundle10_obj ) ]
    return json.dumps( groups, indent=4 )
Esempio n. 5
0
def bundle_to_json(fh):
    """
    Convert the received HG10xx data stream (a mercurial 1.0 bundle created using hg push from the
    command line) to a json object.
    """
    # See http://www.wstein.org/home/wstein/www/home/was/patches/hg_json
    hg_unbundle10_obj = readbundle(get_configured_ui(), fh, None)
    groups = [group for group in unpack_groups(hg_unbundle10_obj)]
    return json.dumps(groups, indent=4)
Esempio n. 6
0
 def applybundle(self):
     fp = self.opener()
     try:
         gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
         bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
                             source='unshelve',
                             url='bundle:' + self.vfs.join(self.fname),
                             targetphase=phases.secret)
     finally:
         fp.close()
Esempio n. 7
0
 def applybundle(self):
     fp = self.opener()
     try:
         gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
         bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
                             source='unshelve',
                             url='bundle:' + self.vfs.join(self.fname),
                             targetphase=phases.secret)
     finally:
         fp.close()
        def clone(self, remote, heads=[], stream=False):
            supported = True
            if not remote.capable('bundles'):
                supported = False
                self.ui.debug(_('bundle clone not supported\n'))
            elif heads:
                supported = False
                self.ui.debug(_('cannot perform bundle clone if heads requested\n'))

            if not supported:
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            result = remote._call('bundles')

            if not result:
                self.ui.note(_('no bundles available; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            # Eventually we'll support choosing the best options. Until then,
            # use the first entry.
            entry = result.splitlines()[0]
            fields = entry.split()
            url = fields[0]

            if not url:
                self.ui.note(_('invalid bundle manifest; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            self.ui.status(_('downloading bundle %s\n' % url))

            try:
                fh = hgurl.open(self.ui, url)
                cg = exchange.readbundle(self.ui, fh, 'stream')

                changegroup.addchangegroup(self, cg, 'bundleclone', url)

                self.ui.status(_('finishing applying bundle; pulling\n'))
                return exchange.pull(self, remote, heads=heads)

            except urllib2.HTTPError as e:
                self.ui.warn(_('HTTP error fetching bundle; using normal clone: %s\n') % str(e))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)
            # This typically means a connectivity, DNS, etc problem.
            except urllib2.URLError as e:
                self.ui.warn(_('error fetching bundle; using normal clone: %s\n') % e.reason)
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)
Esempio n. 9
0
def handlechangegroup_widen(op, inpart):
    """Changegroup exchange handler which restores temporarily-stripped nodes"""
    # We saved a bundle with stripped node data we must now restore.
    # This approach is based on mercurial/repair.py@6ee26a53c111.
    repo = op.repo
    ui = op.ui

    chgrpfile = op._widen_bundle
    del op._widen_bundle
    vfs = repo.vfs

    ui.note(_(b"adding branch\n"))
    f = vfs.open(chgrpfile, b"rb")
    try:
        gen = exchange.readbundle(ui, f, chgrpfile, vfs)
        # silence internal shuffling chatter
        override = {(b'ui', b'quiet'): True}
        if ui.verbose:
            override = {}
        with ui.configoverride(override):
            if isinstance(gen, bundle2.unbundle20):
                with repo.transaction(b'strip') as tr:
                    bundle2.processbundle(repo, gen, lambda: tr)
            else:
                gen.apply(
                    repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True
                )
    finally:
        f.close()

    # remove undo files
    for undovfs, undofile in repo.undofiles():
        try:
            undovfs.unlink(undofile)
        except OSError as e:
            if e.errno != errno.ENOENT:
                ui.warn(
                    _(b'error removing %s: %s\n')
                    % (undovfs.join(undofile), stringutil.forcebytestr(e))
                )

    # Remove partial backup only if there were no exceptions
    op._widen_uninterr.__exit__(None, None, None)
    vfs.unlink(chgrpfile)
Esempio n. 10
0
 def applybundle(self):
     fp = self.opener()
     try:
         targetphase = phases.internal
         if not phases.supportinternal(self.repo):
             targetphase = phases.secret
         gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
         pretip = self.repo['tip']
         tr = self.repo.currenttransaction()
         bundle2.applybundle(self.repo,
                             gen,
                             tr,
                             source='unshelve',
                             url='bundle:' + self.vfs.join(self.fname),
                             targetphase=targetphase)
         shelvectx = self.repo['tip']
         if pretip == shelvectx:
             shelverev = tr.changes['revduplicates'][-1]
             shelvectx = self.repo[shelverev]
         return shelvectx
     finally:
         fp.close()
Esempio n. 11
0
def backups(ui, repo, *pats, **opts):
    '''lists the changesets available in backup bundles

    Without any arguments, this command prints a list of the changesets in each
    backup bundle.

    --recover takes a changeset hash and unbundles the first bundle that
    contains that hash, which puts that changeset back in your repository.

    --verbose will print the entire commit message and the bundle path for that
    backup.
    '''
    supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
    if supportsmarkers and ui.configbool('backups', 'warnobsolescence', True):
        # Warn users of obsolescence markers that they probably don't want to
        # use backups but reflog instead
        ui.warn(msgwithcreatermarkers)
    backuppath = repo.vfs.join("strip-backup")
    backups = filter(os.path.isfile, glob.glob(backuppath + "/*.hg"))
    backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)

    opts['bundle'] = ''
    opts['force'] = None

    if util.safehasattr(cmdutil, 'loglimit'):
        # legacy case
        loglimit = cmdutil.loglimit
        show_changeset = cmdutil.show_changeset
    else:
        # since core commit c8e2d6ed1f9e
        from mercurial import logcmdutil
        loglimit = logcmdutil.getlimit
        show_changeset = logcmdutil.changesetdisplayer

    def display(other, chlist, displayer):
        limit = loglimit(opts)
        if opts.get('newest_first'):
            chlist.reverse()
        count = 0
        for n in chlist:
            if limit is not None and count >= limit:
                break
            parents = [p for p in other.changelog.parents(n) if p != nullid]
            if opts.get('no_merges') and len(parents) == 2:
                continue
            count += 1
            displayer.show(other[n])

    recovernode = opts.get('recover')
    if recovernode:
        if scmutil.isrevsymbol(repo, recovernode):
            ui.warn(_("%s already exists in the repo\n") % recovernode)
            return
    else:
        msg = _('Recover changesets using: hg backups --recover '
                '<changeset hash>\n\nAvailable backup changesets:')
        ui.status(msg, label="status.removed")

    for backup in backups:
        # Much of this is copied from the hg incoming logic
        source = os.path.relpath(backup, pycompat.getcwd())
        source = ui.expandpath(source)
        source, branches = hg.parseurl(source, opts.get('branch'))
        try:
            other = hg.peer(repo, opts, source)
        except error.LookupError as ex:
            msg = _("\nwarning: unable to open bundle %s") % source
            hint = _("\n(missing parent rev %s)\n") % short(ex.name)
            ui.warn(msg)
            ui.warn(hint)
            continue
        revs, checkout = hg.addbranchrevs(repo, other, branches,
                                          opts.get('rev'))

        if revs:
            revs = [other.lookup(rev) for rev in revs]

        quiet = ui.quiet
        try:
            ui.quiet = True
            other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo,
                                        other, revs, opts["bundle"],
                                        opts["force"])
        except error.LookupError:
            continue
        finally:
            ui.quiet = quiet

        try:
            if chlist:
                if recovernode:
                    tr = lock = None
                    try:
                        lock = repo.lock()
                        if scmutil.isrevsymbol(other, recovernode):
                            ui.status(_("Unbundling %s\n") % (recovernode))
                            f = hg.openpath(ui, source)
                            gen = exchange.readbundle(ui, f, source)
                            tr = repo.transaction("unbundle")
                            if not isinstance(gen, bundle2.unbundle20):
                                gen.apply(repo, 'unbundle', 'bundle:' + source)
                            if isinstance(gen, bundle2.unbundle20):
                                bundle2.applybundle(repo, gen, tr,
                                                    source='unbundle',
                                                    url='bundle:' + source)
                            tr.close()
                            break
                    finally:
                        lockmod.release(lock, tr)
                else:
                    backupdate = os.path.getmtime(source)
                    backupdate = time.strftime('%a %H:%M, %Y-%m-%d',
                                                time.localtime(backupdate))
                    ui.status("\n%s\n" % (backupdate.ljust(50)))
                    if not ui.verbose:
                        opts['template'] = verbosetemplate
                    else:
                        ui.status("%s%s\n" % ("bundle:".ljust(13), source))
                    displayer = show_changeset(ui, other, opts, False)
                    display(other, chlist, displayer)
                    displayer.close()
        finally:
            cleanupfn()
Esempio n. 12
0
def _histedit(ui, repo, state, *freeargs, **opts):
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, 'mq', None)
    if mq and mq.applied:
        raise util.Abort(_('source has mq patches applied'))

    # basic argument incompatibility processing
    outg = opts.get('outgoing')
    cont = opts.get('continue')
    editplan = opts.get('edit_plan')
    abort = opts.get('abort')
    force = opts.get('force')
    rules = opts.get('commands', '')
    revs = opts.get('rev', [])
    goal = 'new' # This invocation goal, in new, continue, abort
    if force and not outg:
        raise util.Abort(_('--force only allowed with --outgoing'))
    if cont:
        if util.any((outg, abort, revs, freeargs, rules, editplan)):
            raise util.Abort(_('no arguments allowed with --continue'))
        goal = 'continue'
    elif abort:
        if util.any((outg, revs, freeargs, rules, editplan)):
            raise util.Abort(_('no arguments allowed with --abort'))
        goal = 'abort'
    elif editplan:
        if util.any((outg, revs, freeargs)):
            raise util.Abort(_('only --commands argument allowed with '
                               '--edit-plan'))
        goal = 'edit-plan'
    else:
        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
            raise util.Abort(_('history edit already in progress, try '
                               '--continue or --abort'))
        if outg:
            if revs:
                raise util.Abort(_('no revisions allowed with --outgoing'))
            if len(freeargs) > 1:
                raise util.Abort(
                    _('only one repo argument allowed with --outgoing'))
        else:
            revs.extend(freeargs)
            if len(revs) == 0:
                histeditdefault = ui.config('histedit', 'defaultrev')
                if histeditdefault:
                    revs.append(histeditdefault)
            if len(revs) != 1:
                raise util.Abort(
                    _('histedit requires exactly one ancestor revision'))


    replacements = []
    keep = opts.get('keep', False)

    # rebuild state
    if goal == 'continue':
        state.read()
        state = bootstrapcontinue(ui, state, opts)
    elif goal == 'edit-plan':
        state.read()
        if not rules:
            comment = editcomment % (state.parentctx, node.short(state.topmost))
            rules = ruleeditor(repo, ui, state.rules, comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules])
        state.rules = rules
        state.write()
        return
    elif goal == 'abort':
        state.read()
        mapping, tmpnodes, leafs, _ntm = processreplacement(state)
        ui.debug('restore wc to old parent %s\n' % node.short(state.topmost))

        # Recover our old commits if necessary
        if not state.topmost in repo and state.backupfile:
            backupfile = repo.join(state.backupfile)
            f = hg.openpath(ui, backupfile)
            gen = exchange.readbundle(ui, f, backupfile)
            changegroup.addchangegroup(repo, gen, 'histedit',
                                       'bundle:' + backupfile)
            os.remove(backupfile)

        # check whether we should update away
        parentnodes = [c.node() for c in repo[None].parents()]
        for n in leafs | set([state.parentctxnode]):
            if n in parentnodes:
                hg.clean(repo, state.topmost)
                break
        else:
            pass
        cleanupnode(ui, repo, 'created', tmpnodes)
        cleanupnode(ui, repo, 'temp', leafs)
        state.clear()
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
            if len(rr) != 1:
                raise util.Abort(_('The specified revisions must have '
                    'exactly one common root'))
            root = rr[0].node()

        revs = between(repo, root, topmost, keep)
        if not revs:
            raise util.Abort(_('%s is not an ancestor of working directory') %
                             node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            comment = editcomment % (node.short(root), node.short(topmost))
            rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, ctxs)

        parentctxnode = repo[root].parents()[0].node()

        state.parentctxnode = parentctxnode
        state.rules = rules
        state.keep = keep
        state.topmost = topmost
        state.replacements = replacements

        # Create a backup so we can always abort completely.
        backupfile = None
        if not obsolete.isenabled(repo, obsolete.createmarkersopt):
            backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
                                        'histedit')
        state.backupfile = backupfile

    while state.rules:
        state.write()
        action, ha = state.rules.pop(0)
        ui.debug('histedit: processing %s %s\n' % (action, ha[:12]))
        actobj = actiontable[action].fromrule(state, ha)
        parentctx, replacement_ = actobj.run()
        state.parentctxnode = parentctx.node()
        state.replacements.extend(replacement_)
    state.write()

    hg.update(repo, state.parentctxnode)

    mapping, tmpnodes, created, ntm = processreplacement(state)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug('histedit: %s is dropped\n' % node.short(prec))
            else:
                ui.debug('histedit: %s is replaced by %s\n' % (
                    node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = 'histedit:                            %s'
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if not keep:
        if mapping:
            movebookmarks(ui, repo, mapping, state.topmost, ntm)
            # TODO update mq state
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec],
                                tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, 'replaced', mapping)

    cleanupnode(ui, repo, 'temp', tmpnodes)
    state.clear()
    if os.path.exists(repo.sjoin('undo')):
        os.unlink(repo.sjoin('undo'))
Esempio n. 13
0
        def clone(self, remote, heads=[], stream=False):
            supported = True

            if (exchange and hasattr(exchange, '_maybeapplyclonebundle')
                    and remote.capable('clonebundles')):
                supported = False
                self.ui.warn(
                    _('(mercurial client has built-in support for '
                      'bundle clone features; the "bundleclone" '
                      'extension can likely safely be removed)\n'))

                if not self.ui.configbool('experimental', 'clonebundles',
                                          False):
                    self.ui.warn(
                        _('(but the experimental.clonebundles config '
                          'flag is not enabled: enable it before '
                          'disabling bundleclone or cloning from '
                          'pre-generated bundles may not work)\n'))
                    # We assume that presence of the bundleclone extension
                    # means they want clonebundles enabled. Otherwise, why do
                    # they have bundleclone enabled? So silently enable it.
                    ui.setconfig('experimental', 'clonebundles', True)
            elif not remote.capable('bundles'):
                supported = False
                self.ui.debug(_('bundle clone not supported\n'))
            elif heads:
                supported = False
                self.ui.debug(
                    _('cannot perform bundle clone if heads requested\n'))
            elif stream:
                supported = False
                self.ui.debug(
                    _('ignoring bundle clone because stream was '
                      'requested\n'))

            if not supported:
                return super(bundleclonerepo, self).clone(remote,
                                                          heads=heads,
                                                          stream=stream)

            result = remote._call('bundles')

            if not result:
                self.ui.note(_('no bundles available; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote,
                                                          heads=heads,
                                                          stream=stream)

            pyver = sys.version_info
            pyver = (pyver[0], pyver[1], pyver[2])

            hgver = util.version()
            # Discard bit after '+'.
            hgver = hgver.split('+')[0]
            try:
                hgver = tuple([int(i) for i in hgver.split('.')[0:2]])
            except ValueError:
                hgver = (0, 0)

            # Testing backdoors.
            if ui.config('bundleclone', 'fakepyver'):
                pyver = ui.configlist('bundleclone', 'fakepyver')
                pyver = tuple(int(v) for v in pyver)

            if ui.config('bundleclone', 'fakehgver'):
                hgver = ui.configlist('bundleclone', 'fakehgver')
                hgver = tuple(int(v) for v in hgver[0:2])

            entries = []
            snifilteredfrompython = False
            snifilteredfromhg = False

            for line in result.splitlines():
                fields = line.split()
                url = fields[0]
                attrs = {}
                for rawattr in fields[1:]:
                    key, value = rawattr.split('=', 1)
                    attrs[urllib.unquote(key)] = urllib.unquote(value)

                # Filter out SNI entries if we don't support SNI.
                if attrs.get('requiresni') == 'true':
                    skip = False
                    if pyver < (2, 7, 9):
                        # Take this opportunity to inform people they are using an
                        # old, insecure Python.
                        if not snifilteredfrompython:
                            self.ui.warn(
                                _('(your Python is older than 2.7.9 '
                                  'and does not support modern and '
                                  'secure SSL/TLS; please consider '
                                  'upgrading your Python to a secure '
                                  'version)\n'))
                        snifilteredfrompython = True
                        skip = True

                    if hgver < (3, 3):
                        if not snifilteredfromhg:
                            self.ui.warn(
                                _('(you Mercurial is old and does '
                                  'not support modern and secure '
                                  'SSL/TLS; please consider '
                                  'upgrading your Mercurial to 3.3+ '
                                  'which supports modern and secure '
                                  'SSL/TLS)\n'))
                        snifilteredfromhg = True
                        skip = True

                    if skip:
                        self.ui.warn(
                            _('(ignoring URL on server that requires '
                              'SNI)\n'))
                        continue

                entries.append((url, attrs))

            if not entries:
                # Don't fall back to normal clone because we don't want mass
                # fallback in the wild to barage servers expecting bundle
                # offload.
                raise util.Abort(_('no appropriate bundles available'),
                                 hint=_('you may wish to complain to the '
                                        'server operator'))

            # The configuration is allowed to define lists of preferred
            # attributes and values. If this is present, sort results according
            # to that preference. Otherwise, use manifest order and select the
            # first entry.
            prefers = self.ui.configlist('bundleclone', 'prefers', default=[])
            if prefers:
                prefers = [p.split('=', 1) for p in prefers]

                def compareentry(a, b):
                    aattrs = a[1]
                    battrs = b[1]

                    # Itereate over local preferences.
                    for pkey, pvalue in prefers:
                        avalue = aattrs.get(pkey)
                        bvalue = battrs.get(pkey)

                        # Special case for b is missing attribute and a matches
                        # exactly.
                        if avalue is not None and bvalue is None and avalue == pvalue:
                            return -1

                        # Special case for a missing attribute and b matches
                        # exactly.
                        if bvalue is not None and avalue is None and bvalue == pvalue:
                            return 1

                        # We can't compare unless the attribute is defined on
                        # both entries.
                        if avalue is None or bvalue is None:
                            continue

                        # Same values should fall back to next attribute.
                        if avalue == bvalue:
                            continue

                        # Exact matches come first.
                        if avalue == pvalue:
                            return -1
                        if bvalue == pvalue:
                            return 1

                        # Fall back to next attribute.
                        continue

                    # Entries could not be sorted based on attributes. This
                    # says they are equal, which will fall back to index order,
                    # which is what we want.
                    return 0

                entries = sorted(entries, cmp=compareentry)

            url, attrs = entries[0]

            if not url:
                self.ui.note(
                    _('invalid bundle manifest; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote,
                                                          heads=heads,
                                                          stream=stream)

            self.ui.status(_('downloading bundle %s\n' % url))

            try:
                fh = hgurl.open(self.ui, url)
                # Stream clone data is not changegroup data. Handle it
                # specially.
                if 'stream' in attrs:
                    reqs = set(attrs['stream'].split(','))
                    l = fh.readline()
                    filecount, bytecount = map(int, l.split(' ', 1))
                    self.ui.status(_('streaming all changes\n'))
                    consumev1(self, fh, filecount, bytecount)
                else:
                    if exchange:
                        cg = exchange.readbundle(self.ui, fh, 'stream')
                    else:
                        cg = changegroup.readbundle(fh, 'stream')

                    # Mercurial 3.6 introduced cgNunpacker.apply().
                    # Before that, there was changegroup.addchangegroup().
                    # Before that, there was localrepository.addchangegroup().
                    if hasattr(cg, 'apply'):
                        cg.apply(self, 'bundleclone', url)
                    elif hasattr(changegroup, 'addchangegroup'):
                        changegroup.addchangegroup(self, cg, 'bundleclone',
                                                   url)
                    else:
                        self.addchangegroup(cg, 'bundleclone', url)

                self.ui.status(_('finishing applying bundle; pulling\n'))
                # Maintain compatibility with Mercurial 2.x.
                if exchange:
                    return exchange.pull(self, remote, heads=heads)
                else:
                    return self.pull(remote, heads=heads)

            except (urllib2.HTTPError, urllib2.URLError) as e:
                if isinstance(e, urllib2.HTTPError):
                    msg = _('HTTP error fetching bundle: %s') % str(e)
                else:
                    msg = _('error fetching bundle: %s') % e.reason

                # Don't fall back to regular clone unless explicitly told to.
                if not self.ui.configbool('bundleclone', 'fallbackonerror',
                                          False):
                    raise util.Abort(
                        msg,
                        hint=_('consider contacting the '
                               'server operator if this error persists'))

                self.ui.warn(msg + '\n')
                self.ui.warn(_('falling back to normal clone\n'))

                return super(bundleclonerepo, self).clone(remote,
                                                          heads=heads,
                                                          stream=stream)
Esempio n. 14
0
        def clone(self, remote, heads=[], stream=False):
            supported = True
            if not remote.capable('bundles'):
                supported = False
                self.ui.debug(_('bundle clone not supported\n'))
            elif heads:
                supported = False
                self.ui.debug(_('cannot perform bundle clone if heads requested\n'))
            elif stream:
                supported = False
                self.ui.debug(_('ignoring bundle clone because stream was '
                                'requested\n'))

            if not supported:
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            result = remote._call('bundles')

            if not result:
                self.ui.note(_('no bundles available; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            pyver = sys.version_info
            pyver = (pyver[0], pyver[1], pyver[2])

            # Testing backdoor.
            if ui.config('bundleclone', 'fakepyver'):
                pyver = ui.configlist('bundleclone', 'fakepyver')
                pyver = tuple(int(v) for v in pyver)

            entries = []
            snifiltered = False

            for line in result.splitlines():
                fields = line.split()
                url = fields[0]
                attrs = {}
                for rawattr in fields[1:]:
                    key, value = rawattr.split('=', 1)
                    attrs[urllib.unquote(key)] = urllib.unquote(value)

                # Filter out SNI entries if we don't support SNI.
                if attrs.get('requiresni') == 'true' and pyver < (2, 7, 9):
                    # Take this opportunity to inform people they are using an
                    # old, insecure Python.
                    if not snifiltered:
                        self.ui.warn(_('(ignoring URL on server that requires '
                                       'SNI)\n'))
                        self.ui.warn(_('(your Python is older than 2.7.9 and '
                                       'does not support modern and secure '
                                       'SSL/TLS; please consider upgrading '
                                       'your Python to a secure version)\n'))
                    snifiltered = True
                    continue

                entries.append((url, attrs))

            if not entries:
                # Don't fall back to normal clone because we don't want mass
                # fallback in the wild to barage servers expecting bundle
                # offload.
                raise util.Abort(_('no appropriate bundles available'),
                                 hint=_('you may wish to complain to the '
                                        'server operator'))

            # The configuration is allowed to define lists of preferred
            # attributes and values. If this is present, sort results according
            # to that preference. Otherwise, use manifest order and select the
            # first entry.
            prefers = self.ui.configlist('bundleclone', 'prefers', default=[])
            if prefers:
                prefers = [p.split('=', 1) for p in prefers]

                def compareentry(a, b):
                    aattrs = a[1]
                    battrs = b[1]

                    # Itereate over local preferences.
                    for pkey, pvalue in prefers:
                        avalue = aattrs.get(pkey)
                        bvalue = battrs.get(pkey)

                        # Special case for b is missing attribute and a matches
                        # exactly.
                        if avalue is not None and bvalue is None and avalue == pvalue:
                            return -1

                        # Special case for a missing attribute and b matches
                        # exactly.
                        if bvalue is not None and avalue is None and bvalue == pvalue:
                            return 1

                        # We can't compare unless the attribute is defined on
                        # both entries.
                        if avalue is None or bvalue is None:
                            continue

                        # Same values should fall back to next attribute.
                        if avalue == bvalue:
                            continue

                        # Exact matches come first.
                        if avalue == pvalue:
                            return -1
                        if bvalue == pvalue:
                            return 1

                        # Fall back to next attribute.
                        continue

                    # Entries could not be sorted based on attributes. This
                    # says they are equal, which will fall back to index order,
                    # which is what we want.
                    return 0

                entries = sorted(entries, cmp=compareentry)

            url, attrs = entries[0]

            if not url:
                self.ui.note(_('invalid bundle manifest; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            self.ui.status(_('downloading bundle %s\n' % url))

            try:
                fh = hgurl.open(self.ui, url)
                # Stream clone data is not changegroup data. Handle it
                # specially.
                if 'stream' in attrs:
                    reqs = set(attrs['stream'].split(','))
                    applystreamclone(self, reqs, fh)
                else:
                    if exchange:
                        cg = exchange.readbundle(self.ui, fh, 'stream')
                    else:
                        cg = changegroup.readbundle(fh, 'stream')

                    if hasattr(changegroup, 'addchangegroup'):
                        changegroup.addchangegroup(self, cg, 'bundleclone', url)
                    else:
                        self.addchangegroup(cg, 'bundleclone', url)

                self.ui.status(_('finishing applying bundle; pulling\n'))
                # Maintain compatibility with Mercurial 2.x.
                if exchange:
                    return exchange.pull(self, remote, heads=heads)
                else:
                    return self.pull(remote, heads=heads)

            except (urllib2.HTTPError, urllib2.URLError) as e:
                if isinstance(e, urllib2.HTTPError):
                    msg = _('HTTP error fetching bundle: %s') % str(e)
                else:
                    msg = _('error fetching bundle: %s') % e.reason

                # Don't fall back to regular clone unless explicitly told to.
                if not self.ui.configbool('bundleclone', 'fallbackonerror', False):
                    raise util.Abort(msg, hint=_('consider contacting the '
                        'server operator if this error persists'))

                self.ui.warn(msg + '\n')
                self.ui.warn(_('falling back to normal clone\n'))

                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)
Esempio n. 15
0
def strip(ui, repo, nodelist, backup=True, topic='backup'):

    # Simple way to maintain backwards compatibility for this
    # argument.
    if backup in ['none', 'strip']:
        backup = False

    repo = repo.unfiltered()
    repo.destroying()

    cl = repo.changelog
    # TODO handle undo of merge sets
    if isinstance(nodelist, str):
        nodelist = [nodelist]
    striplist = [cl.rev(node) for node in nodelist]
    striprev = min(striplist)

    # Some revisions with rev > striprev may not be descendants of striprev.
    # We have to find these revisions and put them in a bundle, so that
    # we can restore them after the truncations.
    # To create the bundle we use repo.changegroupsubset which requires
    # the list of heads and bases of the set of interesting revisions.
    # (head = revision in the set that has no descendant in the set;
    #  base = revision in the set that has no ancestor in the set)
    tostrip = set(striplist)
    for rev in striplist:
        for desc in cl.descendants([rev]):
            tostrip.add(desc)

    files = _collectfiles(repo, striprev)
    saverevs = _collectbrokencsets(repo, files, striprev)

    # compute heads
    saveheads = set(saverevs)
    for r in xrange(striprev + 1, len(cl)):
        if r not in tostrip:
            saverevs.add(r)
            saveheads.difference_update(cl.parentrevs(r))
            saveheads.add(r)
    saveheads = [cl.node(r) for r in saveheads]

    # compute base nodes
    if saverevs:
        descendants = set(cl.descendants(saverevs))
        saverevs.difference_update(descendants)
    savebases = [cl.node(r) for r in saverevs]
    stripbases = [cl.node(r) for r in tostrip]

    # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
    # is much faster
    newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
    if newbmtarget:
        newbmtarget = repo[newbmtarget.first()].node()
    else:
        newbmtarget = '.'

    bm = repo._bookmarks
    updatebm = []
    for m in bm:
        rev = repo[bm[m]].rev()
        if rev in tostrip:
            updatebm.append(m)

    # create a changegroup for all the branches we need to keep
    backupfile = None
    vfs = repo.vfs
    if backup:
        backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
        repo.ui.status(_("saved backup bundle to %s\n") % vfs.join(backupfile))
        repo.ui.log("backupbundle", "saved backup bundle to %s\n",
                    vfs.join(backupfile))
    if saveheads or savebases:
        # do not compress partial bundle if we remove it from disk later
        chgrpfile = _bundle(repo,
                            savebases,
                            saveheads,
                            node,
                            'temp',
                            compress=False)

    mfst = repo.manifest

    tr = repo.transaction("strip")
    offset = len(tr.entries)

    try:
        tr.startgroup()
        cl.strip(striprev, tr)
        mfst.strip(striprev, tr)
        for fn in files:
            repo.file(fn).strip(striprev, tr)
        tr.endgroup()

        try:
            for i in xrange(offset, len(tr.entries)):
                file, troffset, ignore = tr.entries[i]
                repo.svfs(file, 'a').truncate(troffset)
                if troffset == 0:
                    repo.store.markremoved(file)
            tr.close()
        except:  # re-raises
            tr.abort()
            raise

        if saveheads or savebases:
            ui.note(_("adding branch\n"))
            f = vfs.open(chgrpfile, "rb")
            gen = exchange.readbundle(ui, f, chgrpfile, vfs)
            if not repo.ui.verbose:
                # silence internal shuffling chatter
                repo.ui.pushbuffer()
            if isinstance(gen, bundle2.unbundle20):
                tr = repo.transaction('strip')
                try:
                    bundle2.processbundle(repo, gen, lambda: tr)
                    tr.close()
                finally:
                    tr.release()
            else:
                changegroup.addchangegroup(repo, gen, 'strip',
                                           'bundle:' + vfs.join(chgrpfile),
                                           True)
            if not repo.ui.verbose:
                repo.ui.popbuffer()
            f.close()

        # remove undo files
        for undovfs, undofile in repo.undofiles():
            try:
                undovfs.unlink(undofile)
            except OSError, e:
                if e.errno != errno.ENOENT:
                    ui.warn(
                        _('error removing %s: %s\n') %
                        (undovfs.join(undofile), str(e)))

        for m in updatebm:
            bm[m] = repo[newbmtarget].node()
        bm.write()
Esempio n. 16
0
        def clone(self, remote, heads=[], stream=False):
            supported = True

            if (exchange and hasattr(exchange, '_maybeapplyclonebundle')
                    and remote.capable('clonebundles')):
                supported = False
                self.ui.warn(_('(mercurial client has built-in support for '
                               'bundle clone features; the "bundleclone" '
                               'extension can likely safely be removed)\n'))

                if not self.ui.configbool('experimental', 'clonebundles', False):
                    self.ui.warn(_('(but the experimental.clonebundles config '
                                   'flag is not enabled: enable it before '
                                   'disabling bundleclone or cloning from '
                                   'pre-generated bundles may not work)\n'))
                    # We assume that presence of the bundleclone extension
                    # means they want clonebundles enabled. Otherwise, why do
                    # they have bundleclone enabled? So silently enable it.
                    ui.setconfig('experimental', 'clonebundles', True)
            elif not remote.capable('bundles'):
                supported = False
                self.ui.debug(_('bundle clone not supported\n'))
            elif heads:
                supported = False
                self.ui.debug(_('cannot perform bundle clone if heads requested\n'))
            elif stream:
                supported = False
                self.ui.debug(_('ignoring bundle clone because stream was '
                                'requested\n'))

            if not supported:
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            result = remote._call('bundles')

            if not result:
                self.ui.note(_('no bundles available; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            pyver = sys.version_info
            pyver = (pyver[0], pyver[1], pyver[2])

            hgver = util.version()
            # Discard bit after '+'.
            hgver = hgver.split('+')[0]
            try:
                hgver = tuple([int(i) for i in hgver.split('.')[0:2]])
            except ValueError:
                hgver = (0, 0)

            # Testing backdoors.
            if ui.config('bundleclone', 'fakepyver'):
                pyver = ui.configlist('bundleclone', 'fakepyver')
                pyver = tuple(int(v) for v in pyver)

            if ui.config('bundleclone', 'fakehgver'):
                hgver = ui.configlist('bundleclone', 'fakehgver')
                hgver = tuple(int(v) for v in hgver[0:2])

            entries = []
            snifilteredfrompython = False
            snifilteredfromhg = False

            for line in result.splitlines():
                fields = line.split()
                url = fields[0]
                attrs = {}
                for rawattr in fields[1:]:
                    key, value = rawattr.split('=', 1)
                    attrs[urllib.unquote(key)] = urllib.unquote(value)

                # Filter out SNI entries if we don't support SNI.
                if attrs.get('requiresni') == 'true':
                    skip = False
                    if pyver < (2, 7, 9):
                        # Take this opportunity to inform people they are using an
                        # old, insecure Python.
                        if not snifilteredfrompython:
                            self.ui.warn(_('(your Python is older than 2.7.9 '
                                           'and does not support modern and '
                                           'secure SSL/TLS; please consider '
                                           'upgrading your Python to a secure '
                                           'version)\n'))
                        snifilteredfrompython = True
                        skip = True

                    if hgver < (3, 3):
                        if not snifilteredfromhg:
                            self.ui.warn(_('(you Mercurial is old and does '
                                           'not support modern and secure '
                                           'SSL/TLS; please consider '
                                           'upgrading your Mercurial to 3.3+ '
                                           'which supports modern and secure '
                                           'SSL/TLS)\n'))
                        snifilteredfromhg = True
                        skip = True

                    if skip:
                        self.ui.warn(_('(ignoring URL on server that requires '
                                       'SNI)\n'))
                        continue

                entries.append((url, attrs))

            if not entries:
                # Don't fall back to normal clone because we don't want mass
                # fallback in the wild to barage servers expecting bundle
                # offload.
                raise util.Abort(_('no appropriate bundles available'),
                                 hint=_('you may wish to complain to the '
                                        'server operator'))

            # The configuration is allowed to define lists of preferred
            # attributes and values. If this is present, sort results according
            # to that preference. Otherwise, use manifest order and select the
            # first entry.
            prefers = self.ui.configlist('bundleclone', 'prefers', default=[])
            if prefers:
                prefers = [p.split('=', 1) for p in prefers]

                def compareentry(a, b):
                    aattrs = a[1]
                    battrs = b[1]

                    # Itereate over local preferences.
                    for pkey, pvalue in prefers:
                        avalue = aattrs.get(pkey)
                        bvalue = battrs.get(pkey)

                        # Special case for b is missing attribute and a matches
                        # exactly.
                        if avalue is not None and bvalue is None and avalue == pvalue:
                            return -1

                        # Special case for a missing attribute and b matches
                        # exactly.
                        if bvalue is not None and avalue is None and bvalue == pvalue:
                            return 1

                        # We can't compare unless the attribute is defined on
                        # both entries.
                        if avalue is None or bvalue is None:
                            continue

                        # Same values should fall back to next attribute.
                        if avalue == bvalue:
                            continue

                        # Exact matches come first.
                        if avalue == pvalue:
                            return -1
                        if bvalue == pvalue:
                            return 1

                        # Fall back to next attribute.
                        continue

                    # Entries could not be sorted based on attributes. This
                    # says they are equal, which will fall back to index order,
                    # which is what we want.
                    return 0

                entries = sorted(entries, cmp=compareentry)

            url, attrs = entries[0]

            if not url:
                self.ui.note(_('invalid bundle manifest; using normal clone\n'))
                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)

            self.ui.status(_('downloading bundle %s\n' % url))

            try:
                fh = hgurl.open(self.ui, url)
                # Stream clone data is not changegroup data. Handle it
                # specially.
                if 'stream' in attrs:
                    reqs = set(attrs['stream'].split(','))
                    l = fh.readline()
                    filecount, bytecount = map(int, l.split(' ', 1))
                    self.ui.status(_('streaming all changes\n'))
                    consumev1(self, fh, filecount, bytecount)
                else:
                    if exchange:
                        cg = exchange.readbundle(self.ui, fh, 'stream')
                    else:
                        cg = changegroup.readbundle(fh, 'stream')

                    # Mercurial 3.6 introduced cgNunpacker.apply().
                    # Before that, there was changegroup.addchangegroup().
                    # Before that, there was localrepository.addchangegroup().
                    if hasattr(cg, 'apply'):
                        cg.apply(self, 'bundleclone', url)
                    elif hasattr(changegroup, 'addchangegroup'):
                        changegroup.addchangegroup(self, cg, 'bundleclone', url)
                    else:
                        self.addchangegroup(cg, 'bundleclone', url)

                self.ui.status(_('finishing applying bundle; pulling\n'))
                # Maintain compatibility with Mercurial 2.x.
                if exchange:
                    return exchange.pull(self, remote, heads=heads)
                else:
                    return self.pull(remote, heads=heads)

            except (urllib2.HTTPError, urllib2.URLError) as e:
                if isinstance(e, urllib2.HTTPError):
                    msg = _('HTTP error fetching bundle: %s') % str(e)
                else:
                    msg = _('error fetching bundle: %s') % e.reason

                # Don't fall back to regular clone unless explicitly told to.
                if not self.ui.configbool('bundleclone', 'fallbackonerror', False):
                    raise util.Abort(msg, hint=_('consider contacting the '
                        'server operator if this error persists'))

                self.ui.warn(msg + '\n')
                self.ui.warn(_('falling back to normal clone\n'))

                return super(bundleclonerepo, self).clone(remote, heads=heads,
                        stream=stream)
Esempio n. 17
0
def strip(ui, repo, nodelist, backup=True, topic='backup'):

    # Simple way to maintain backwards compatibility for this
    # argument.
    if backup in ['none', 'strip']:
        backup = False

    repo = repo.unfiltered()
    repo.destroying()

    cl = repo.changelog
    # TODO handle undo of merge sets
    if isinstance(nodelist, str):
        nodelist = [nodelist]
    striplist = [cl.rev(node) for node in nodelist]
    striprev = min(striplist)

    # Some revisions with rev > striprev may not be descendants of striprev.
    # We have to find these revisions and put them in a bundle, so that
    # we can restore them after the truncations.
    # To create the bundle we use repo.changegroupsubset which requires
    # the list of heads and bases of the set of interesting revisions.
    # (head = revision in the set that has no descendant in the set;
    #  base = revision in the set that has no ancestor in the set)
    tostrip = set(striplist)
    for rev in striplist:
        for desc in cl.descendants([rev]):
            tostrip.add(desc)

    files = _collectfiles(repo, striprev)
    saverevs = _collectbrokencsets(repo, files, striprev)

    # compute heads
    saveheads = set(saverevs)
    for r in xrange(striprev + 1, len(cl)):
        if r not in tostrip:
            saverevs.add(r)
            saveheads.difference_update(cl.parentrevs(r))
            saveheads.add(r)
    saveheads = [cl.node(r) for r in saveheads]

    # compute base nodes
    if saverevs:
        descendants = set(cl.descendants(saverevs))
        saverevs.difference_update(descendants)
    savebases = [cl.node(r) for r in saverevs]
    stripbases = [cl.node(r) for r in tostrip]

    # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
    # is much faster
    newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
    if newbmtarget:
        newbmtarget = repo[newbmtarget.first()].node()
    else:
        newbmtarget = '.'

    bm = repo._bookmarks
    updatebm = []
    for m in bm:
        rev = repo[bm[m]].rev()
        if rev in tostrip:
            updatebm.append(m)

    # create a changegroup for all the branches we need to keep
    backupfile = None
    vfs = repo.vfs
    node = nodelist[-1]
    if backup:
        backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
        repo.ui.status(_("saved backup bundle to %s\n") %
                       vfs.join(backupfile))
        repo.ui.log("backupbundle", "saved backup bundle to %s\n",
                    vfs.join(backupfile))
    if saveheads or savebases:
        # do not compress partial bundle if we remove it from disk later
        chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
                            compress=False)

    mfst = repo.manifest

    tr = repo.transaction("strip")
    offset = len(tr.entries)

    try:
        tr.startgroup()
        cl.strip(striprev, tr)
        mfst.strip(striprev, tr)
        for fn in files:
            repo.file(fn).strip(striprev, tr)
        tr.endgroup()

        try:
            for i in xrange(offset, len(tr.entries)):
                file, troffset, ignore = tr.entries[i]
                repo.svfs(file, 'a').truncate(troffset)
                if troffset == 0:
                    repo.store.markremoved(file)
            tr.close()
        except: # re-raises
            tr.abort()
            raise

        if saveheads or savebases:
            ui.note(_("adding branch\n"))
            f = vfs.open(chgrpfile, "rb")
            gen = exchange.readbundle(ui, f, chgrpfile, vfs)
            if not repo.ui.verbose:
                # silence internal shuffling chatter
                repo.ui.pushbuffer()
            if isinstance(gen, bundle2.unbundle20):
                tr = repo.transaction('strip')
                tr.hookargs = {'source': 'strip',
                               'url': 'bundle:' + vfs.join(chgrpfile)}
                try:
                    bundle2.processbundle(repo, gen, lambda: tr)
                    tr.close()
                finally:
                    tr.release()
            else:
                changegroup.addchangegroup(repo, gen, 'strip',
                                           'bundle:' + vfs.join(chgrpfile),
                                           True)
            if not repo.ui.verbose:
                repo.ui.popbuffer()
            f.close()

        # remove undo files
        for undovfs, undofile in repo.undofiles():
            try:
                undovfs.unlink(undofile)
            except OSError, e:
                if e.errno != errno.ENOENT:
                    ui.warn(_('error removing %s: %s\n') %
                            (undovfs.join(undofile), str(e)))

        for m in updatebm:
            bm[m] = repo[newbmtarget].node()
        bm.write()
Esempio n. 18
0
def _histedit(ui, repo, state, *freeargs, **opts):
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, 'mq', None)
    if mq and mq.applied:
        raise error.Abort(_('source has mq patches applied'))

    # basic argument incompatibility processing
    outg = opts.get('outgoing')
    cont = opts.get('continue')
    editplan = opts.get('edit_plan')
    abort = opts.get('abort')
    force = opts.get('force')
    rules = opts.get('commands', '')
    revs = opts.get('rev', [])
    goal = 'new' # This invocation goal, in new, continue, abort
    if force and not outg:
        raise error.Abort(_('--force only allowed with --outgoing'))
    if cont:
        if any((outg, abort, revs, freeargs, rules, editplan)):
            raise error.Abort(_('no arguments allowed with --continue'))
        goal = 'continue'
    elif abort:
        if any((outg, revs, freeargs, rules, editplan)):
            raise error.Abort(_('no arguments allowed with --abort'))
        goal = 'abort'
    elif editplan:
        if any((outg, revs, freeargs)):
            raise error.Abort(_('only --commands argument allowed with '
                               '--edit-plan'))
        goal = 'edit-plan'
    else:
        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
            raise error.Abort(_('history edit already in progress, try '
                               '--continue or --abort'))
        if outg:
            if revs:
                raise error.Abort(_('no revisions allowed with --outgoing'))
            if len(freeargs) > 1:
                raise error.Abort(
                    _('only one repo argument allowed with --outgoing'))
        else:
            revs.extend(freeargs)
            if len(revs) == 0:
                # experimental config: histedit.defaultrev
                histeditdefault = ui.config('histedit', 'defaultrev')
                if histeditdefault:
                    revs.append(histeditdefault)
            if len(revs) != 1:
                raise error.Abort(
                    _('histedit requires exactly one ancestor revision'))


    replacements = []
    state.keep = opts.get('keep', False)
    supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)

    # rebuild state
    if goal == 'continue':
        state.read()
        state = bootstrapcontinue(ui, state, opts)
    elif goal == 'edit-plan':
        state.read()
        if not rules:
            comment = editcomment % (node.short(state.parentctxnode),
                                     node.short(state.topmost))
            rules = ruleeditor(repo, ui, state.rules, comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules])
        state.rules = rules
        state.write()
        return
    elif goal == 'abort':
        try:
            state.read()
            tmpnodes, leafs = newnodestoabort(state)
            ui.debug('restore wc to old parent %s\n'
                    % node.short(state.topmost))

            # Recover our old commits if necessary
            if not state.topmost in repo and state.backupfile:
                backupfile = repo.join(state.backupfile)
                f = hg.openpath(ui, backupfile)
                gen = exchange.readbundle(ui, f, backupfile)
                tr = repo.transaction('histedit.abort')
                try:
                    if not isinstance(gen, bundle2.unbundle20):
                        gen.apply(repo, 'histedit', 'bundle:' + backupfile)
                    if isinstance(gen, bundle2.unbundle20):
                        bundle2.applybundle(repo, gen, tr,
                                            source='histedit',
                                            url='bundle:' + backupfile)
                    tr.close()
                finally:
                    tr.release()

                os.remove(backupfile)

            # check whether we should update away
            if repo.unfiltered().revs('parents() and (%n  or %ln::)',
                                    state.parentctxnode, leafs | tmpnodes):
                hg.clean(repo, state.topmost)
            cleanupnode(ui, repo, 'created', tmpnodes)
            cleanupnode(ui, repo, 'temp', leafs)
        except Exception:
            if state.inprogress():
                ui.warn(_('warning: encountered an exception during histedit '
                    '--abort; the repository may not have been completely '
                    'cleaned up\n'))
            raise
        finally:
                state.clear()
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
            if len(rr) != 1:
                raise error.Abort(_('The specified revisions must have '
                    'exactly one common root'))
            root = rr[0].node()

        revs = between(repo, root, topmost, state.keep)
        if not revs:
            raise error.Abort(_('%s is not an ancestor of working directory') %
                             node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            comment = editcomment % (node.short(root), node.short(topmost))
            rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, ctxs)

        parentctxnode = repo[root].parents()[0].node()

        state.parentctxnode = parentctxnode
        state.rules = rules
        state.topmost = topmost
        state.replacements = replacements

        # Create a backup so we can always abort completely.
        backupfile = None
        if not obsolete.isenabled(repo, obsolete.createmarkersopt):
            backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
                                        'histedit')
        state.backupfile = backupfile

    # preprocess rules so that we can hide inner folds from the user
    # and only show one editor
    rules = state.rules[:]
    for idx, ((action, ha), (nextact, unused)) in enumerate(
            zip(rules, rules[1:] + [(None, None)])):
        if action == 'fold' and nextact == 'fold':
            state.rules[idx] = '_multifold', ha

    while state.rules:
        state.write()
        action, ha = state.rules.pop(0)
        ui.debug('histedit: processing %s %s\n' % (action, ha[:12]))
        actobj = actiontable[action].fromrule(state, ha)
        parentctx, replacement_ = actobj.run()
        state.parentctxnode = parentctx.node()
        state.replacements.extend(replacement_)
    state.write()

    hg.update(repo, state.parentctxnode)

    mapping, tmpnodes, created, ntm = processreplacement(state)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug('histedit: %s is dropped\n' % node.short(prec))
            else:
                ui.debug('histedit: %s is replaced by %s\n' % (
                    node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = 'histedit:                            %s'
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if supportsmarkers:
        # Only create markers if the temp nodes weren't already removed.
        obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes)
                                       if t in repo))
    else:
        cleanupnode(ui, repo, 'temp', tmpnodes)

    if not state.keep:
        if mapping:
            movebookmarks(ui, repo, mapping, state.topmost, ntm)
            # TODO update mq state
        if supportsmarkers:
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec],
                                tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, 'replaced', mapping)

    state.clear()
    if os.path.exists(repo.sjoin('undo')):
        os.unlink(repo.sjoin('undo'))