Ejemplo n.º 1
0
def startfrom(ui, repo, opts):
    base, dest = 'null', 'tip'
    if opts.get('bookmark'):
        dest = opts.get('bookmark')
    if opts.get('base'):
        base = opts['base']
        if opts.get('bookmark') not in repo:
            dest = base

    basectx = scmutil.revsingle(repo, base)
    destctx = scmutil.revsingle(repo, dest)
    ctx = list(repo.set("""
        last(
          %n::%n and (
             extra(p4changelist) or
             extra(p4fullimportbasechangelist)))""",
             basectx.node(), destctx.node()))
    if ctx:
        ctx = ctx[0]
        startcl = lastcl(ctx)
        ui.note(_('incremental import from changelist: %d, node: %s\n') %
                (startcl, short(ctx.node())))
        if ctx.node() == basectx.node():
            ui.note(_('creating branchpoint, base %s\n') %
                    short(basectx.node()))
            return ctx, startcl, True
        return ctx, startcl, False
    raise error.Abort(_('no valid p4 changelist number.'))
Ejemplo n.º 2
0
def handle_entries_for_push(web, samepush, p):
    '''Displays pushlog changelist entries for a single push

    The main use of this function is to ensure the first changeset
    for a given push receives extra required information, namely
    the information needed to populate the `mergerollup` and `push`
    fields. These fields are only present on the first changeset in
    a push, and show the number of changesets merged in this push
    (if the push was a merge) and the user who pushed the change,
    respectively.
    '''
    pushcount = len(samepush)

    pushid, user, date, node = samepush.popleft()
    ctx = scmutil.revsingle(web.repo, node)
    multiple_parents = len([c for c in ctx.parents() if c.node() != nullid]) > 1
    mergehidden = "hidden" if multiple_parents else ""

    # Yield the initial entry, which contains special information such as
    # the number of changesets merged in this push
    yield create_entry(ctx, web, pushid, user, date, node, mergehidden, p,
                       pushcount=pushcount)

    # Yield all other entries for the given push
    for pushid, user, date, node in samepush:
        ctx = scmutil.revsingle(web.repo, node)
        yield create_entry(ctx, web, pushid, user, date, node, mergehidden, p,
                           pushcount=None)
def isancestorwebcommand(web):
    """Determine whether a changeset is an ancestor of another."""
    req = web.req
    for k in (b'head', b'node'):
        if k not in req.qsparams:
            raise ErrorResponse(HTTP_NOT_FOUND, b"missing parameter '%s'" % k)

    head = req.qsparams[b'head']
    node = req.qsparams[b'node']

    try:
        headctx = scmutil.revsingle(web.repo, head)
    except error.RepoLookupError:
        raise ErrorResponse(HTTP_NOT_FOUND, b'unknown head revision %s' % head)

    try:
        testctx = scmutil.revsingle(web.repo, node)
    except error.RepoLookupError:
        raise ErrorResponse(HTTP_NOT_FOUND, b'unknown node revision %s' % node)

    testrev = testctx.rev()
    isancestor = False

    for rev in web.repo.changelog.ancestors([headctx.rev()], inclusive=True):
        if rev == testrev:
            isancestor = True
            break

    return web.sendtemplate(b'isancestor',
                            headnode=headctx.hex(),
                            testnode=testctx.hex(),
                            isancestor=isancestor)
Ejemplo n.º 4
0
def mozbuildinfocommand(ui, repo, *paths, **opts):
    # This module imports modules not available to the hgweb virtualenv.
    # Delay importing it so it doesn't interfere with operation outside the
    # moz.build evaluation context.
    import mozhg.mozbuildinfo as mozbuildinfo

    if opts['pipemode']:
        data = json.loads(ui.fin.read())

        repo = hg.repository(ui, path=data['repo'])
        ctx = scmutil.revsingle(repo, bytes(data['node']))

        paths = data['paths']
    else:
        ctx = scmutil.revsingle(repo, bytes(opts['rev']))

    try:
        d = mozbuildinfo.filesinfo(repo, ctx, paths=paths)
    except Exception as e:
        d = {'error': 'Exception reading moz.build info: %s' % str(e)}

    if not d:
        d = {'error': 'no moz.build info available'}

    # TODO send data to templater.
    # Use stable output and indentation to make testing easier.
    ui.write(json.dumps(d, indent=2, sort_keys=True))
    ui.write('\n')
    return
Ejemplo n.º 5
0
def perfpathcopies(ui, repo, rev1, rev2, **opts):
    timer, fm = gettimer(ui, opts)
    ctx1 = scmutil.revsingle(repo, rev1, rev1)
    ctx2 = scmutil.revsingle(repo, rev2, rev2)
    def d():
        copies.pathcopies(ctx1, ctx2)
    timer(d)
    fm.end()
Ejemplo n.º 6
0
def perfpathcopies(ui, repo, rev1, rev2, **opts):
    timer, fm = gettimer(ui, opts)
    ctx1 = scmutil.revsingle(repo, rev1, rev1)
    ctx2 = scmutil.revsingle(repo, rev2, rev2)
    def d():
        copies.pathcopies(ctx1, ctx2)
    timer(d)
    fm.end()
Ejemplo n.º 7
0
def perfpathcopies(ui, repo, rev1, rev2):
    ctx1 = scmutil.revsingle(repo, rev1, rev1)
    ctx2 = scmutil.revsingle(repo, rev2, rev2)

    def d():
        copies.pathcopies(ctx1, ctx2)

    timer(d)
Ejemplo n.º 8
0
def _masterrev(repo, masterrevset):
    try:
        master = scmutil.revsingle(repo, masterrevset)
    except error.RepoLookupError:
        master = scmutil.revsingle(repo, _masterrevset(repo.ui, repo, ''))
    except error.Abort:  # empty revision set
        return None

    if master:
        return master.rev()
    return None
Ejemplo n.º 9
0
def _masterrev(repo, masterrevset):
    try:
        master = scmutil.revsingle(repo, masterrevset)
    except error.RepoLookupError:
        master = scmutil.revsingle(repo, _masterrevset(repo.ui, repo, ''))
    except error.Abort:  # empty revision set
        return None

    if master:
        return master.rev()
    return None
Ejemplo n.º 10
0
def children(ui, repo, file_=None, **opts):
    """show the children of the given or working directory revision

    Print the children of the working directory's revisions. If a
    revision is given via -r/--rev, the children of that revision will
    be printed. If a file argument is given, revision in which the
    file was last changed (after the working directory revision or the
    argument to --rev if given) is printed.

    Please use :hg:`log` instead::

        hg children => hg log -r "children(.)"
        hg children -r REV => hg log -r "children(REV)"

    See :hg:`help log` and :hg:`help revsets.children`.

    """
    opts = pycompat.byteskwargs(opts)
    rev = opts.get(b'rev')
    ctx = scmutil.revsingle(repo, rev)
    if file_:
        fctx = repo.filectx(file_, changeid=ctx.rev())
        childctxs = [fcctx.changectx() for fcctx in fctx.children()]
    else:
        childctxs = ctx.children()

    displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
    for cctx in childctxs:
        displayer.show(cctx)
    displayer.close()
Ejemplo n.º 11
0
def _rebase(orig, ui, repo, **opts):
    if not opts.get('date') and not ui.configbool('tweakdefaults',
                                                  'rebasekeepdate'):
        opts['date'] = currentdate()

    if opts.get('continue') or opts.get('abort') or opts.get('restack'):
        return orig(ui, repo, **opts)

    # 'hg rebase' w/o args should do nothing
    if not opts.get('dest'):
        raise error.Abort("you must specify a destination (-d) for the rebase")

    # 'hg rebase' can fast-forward bookmark
    prev = repo['.']
    dest = scmutil.revsingle(repo, opts.get('dest'))

    # Only fast-forward the bookmark if no source nodes were explicitly
    # specified.
    if not (opts.get('base') or opts.get('source') or opts.get('rev')):
        common = dest.ancestor(prev)
        if prev == common:
            result = hg.update(repo, dest.node())
            if bmactive(repo):
                with repo.wlock():
                    bookmarks.update(repo, [prev.node()], dest.node())
            return result

    return orig(ui, repo, **opts)
Ejemplo n.º 12
0
def _filterfetchpaths(repo, paths):
    """return a subset of paths whose history is long and need to fetch linelog
    from the server. works with remotefilelog and non-remotefilelog repos.
    """
    threshold = repo.ui.configint('fastannotate', 'clientfetchthreshold', 10)
    if threshold <= 0:
        return paths

    master = repo.ui.config('fastannotate', 'mainbranch') or 'default'

    if 'remotefilelog' in repo.requirements:
        ctx = scmutil.revsingle(repo, master)
        f = lambda path: len(ctx[path].ancestormap())
    else:
        f = lambda path: len(repo.file(path))

    result = []
    for path in paths:
        try:
            if f(path) >= threshold:
                result.append(path)
        except Exception: # file not found etc.
            result.append(path)

    return result
Ejemplo n.º 13
0
def perfmanifest(ui, repo, rev):
    ctx = scmutil.revsingle(repo, rev, rev)
    t = ctx.manifestnode()
    def d():
        repo.manifest._mancache.clear()
        repo.manifest._cache = None
        repo.manifest.read(t)
    timer(d)
Ejemplo n.º 14
0
def perfmanifest(ui, repo, rev, **opts):
    timer, fm = gettimer(ui, opts)
    ctx = scmutil.revsingle(repo, rev, rev)
    t = ctx.manifestnode()
    def d():
        repo.manifestlog.clearcaches()
        repo.manifestlog[t].read()
    timer(d)
    fm.end()
Ejemplo n.º 15
0
def resolveonto(repo, ontoarg):
    try:
        if ontoarg != donotrebasemarker:
            return scmutil.revsingle(repo, ontoarg)
    except error.RepoLookupError:
        # Probably a new bookmark. Leave onto as None to not do any rebasing
        pass
    # onto is None means don't do rebasing
    return None
Ejemplo n.º 16
0
def perfmanifest(ui, repo, rev, **opts):
    timer, fm = gettimer(ui, opts)
    ctx = scmutil.revsingle(repo, rev, rev)
    t = ctx.manifestnode()
    def d():
        repo.manifest.clearcaches()
        repo.manifest.read(t)
    timer(d)
    fm.end()
Ejemplo n.º 17
0
def gverify(ui, repo, **opts):
    '''verify that a Mercurial rev matches the corresponding Git rev

    Given a Mercurial revision that has a corresponding Git revision in the map,
    this attempts to answer whether that revision has the same contents as the
    corresponding Git revision.

    '''
    ctx = scmutil.revsingle(repo, opts.get('rev'), '.')
    return verify.verify(ui, repo, ctx)
Ejemplo n.º 18
0
def perfmanifest(ui, repo, rev):
    ctx = scmutil.revsingle(repo, rev, rev)
    t = ctx.manifestnode()

    def d():
        repo.manifest._mancache.clear()
        repo.manifest._cache = None
        repo.manifest.read(t)

    timer(d)
Ejemplo n.º 19
0
def gverify(ui, repo, **opts):
    '''verify that a Mercurial rev matches the corresponding Git rev

    Given a Mercurial revision that has a corresponding Git revision in the map,
    this attempts to answer whether that revision has the same contents as the
    corresponding Git revision.

    '''
    ctx = scmutil.revsingle(repo, opts.get('rev'), '.')
    return verify.verify(ui, repo, ctx)
Ejemplo n.º 20
0
def perfmanifest(ui, repo, rev, **opts):
    timer, fm = gettimer(ui, opts)
    ctx = scmutil.revsingle(repo, rev, rev)
    t = ctx.manifestnode()
    def d():
        repo.manifest._mancache.clear()
        repo.manifest._cache = None
        repo.manifest.read(t)
    timer(d)
    fm.end()
Ejemplo n.º 21
0
def merge(ui, repoagent, *pats, **opts):
    """merge wizard"""
    from tortoisehg.hgqt import merge as mergemod
    rev = opts.get('rev') or None
    if not rev and len(pats):
        rev = pats[0]
    if not rev:
        raise util.Abort(_('Merge revision not specified or not found'))
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, rev).rev()
    return mergemod.MergeDialog(repoagent, rev)
Ejemplo n.º 22
0
def merge(ui, repoagent, *pats, **opts):
    """merge wizard"""
    from tortoisehg.hgqt import merge as mergemod
    rev = opts.get('rev') or None
    if not rev and len(pats):
        rev = pats[0]
    if not rev:
        raise util.Abort(_('Merge revision not specified or not found'))
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, rev).rev()
    return mergemod.MergeDialog(repoagent, rev)
Ejemplo n.º 23
0
def fileview(ui, repo, **opts):
    revCtx = scmutil.revsingle(repo, opts["revision"])
    subrepos = {}
    if not opts["disableSubRepositoryDetection"]:
        subrepos = collect_sub_repositories(revCtx)
    printer = File_Printer(ui, repo, revCtx, opts["disableLastCommit"],
                           opts["transport"])
    viewer = File_Viewer(revCtx, printer)
    viewer.recursive = opts["recursive"]
    viewer.sub_repositories = subrepos
    viewer.view(opts["path"])
Ejemplo n.º 24
0
def bookmark(ui, repoagent, *names, **opts):
    """add or remove a movable marker"""
    from tortoisehg.hgqt import bookmark as bookmarkmod
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    if len(names) > 1:
        raise util.Abort(_('only one new bookmark name allowed'))
    dlg = bookmarkmod.BookmarkDialog(repoagent, rev)
    if names:
        dlg.setBookmarkName(hglib.tounicode(names[0]))
    return dlg
Ejemplo n.º 25
0
def bookmark(ui, repoagent, *names, **opts):
    """add or remove a movable marker"""
    from tortoisehg.hgqt import bookmark as bookmarkmod
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    if len(names) > 1:
        raise util.Abort(_('only one new bookmark name allowed'))
    dlg = bookmarkmod.BookmarkDialog(repoagent, rev)
    if names:
        dlg.setBookmarkName(hglib.tounicode(names[0]))
    return dlg
Ejemplo n.º 26
0
def overridecat(orig, ui, repo, file1, *pats, **opts):
    ctx = scmutil.revsingle(repo, opts.get('rev'))
    err = 1
    notbad = set()
    m = scmutil.match(ctx, (file1, ) + pats, opts)
    origmatchfn = m.matchfn

    def lfmatchfn(f):
        if origmatchfn(f):
            return True
        lf = lfutil.splitstandin(f)
        if lf is None:
            return False
        notbad.add(lf)
        return origmatchfn(lf)

    m.matchfn = lfmatchfn
    origbadfn = m.bad

    def lfbadfn(f, msg):
        if not f in notbad:
            origbadfn(f, msg)

    m.bad = lfbadfn
    for f in ctx.walk(m):
        fp = cmdutil.makefileobj(repo,
                                 opts.get('output'),
                                 ctx.node(),
                                 pathname=f)
        lf = lfutil.splitstandin(f)
        if lf is None or origmatchfn(f):
            # duplicating unreachable code from commands.cat
            data = ctx[f].data()
            if opts.get('decode'):
                data = repo.wwritedata(f, data)
            fp.write(data)
        else:
            hash = lfutil.readstandin(repo, lf, ctx.rev())
            if not lfutil.inusercache(repo.ui, hash):
                store = basestore._openstore(repo)
                success, missing = store.get([(lf, hash)])
                if len(success) != 1:
                    raise util.Abort(
                        _('largefile %s is not in cache and could not be '
                          'downloaded') % lf)
            path = lfutil.usercachepath(repo.ui, hash)
            fpin = open(path, "rb")
            for chunk in util.filechunkiter(fpin, 128 * 1024):
                fp.write(chunk)
            fpin.close()
        fp.close()
        err = 0
    return err
Ejemplo n.º 27
0
def debugbuildannotatecache(ui, repo, *pats, **opts):
    """incrementally build fastannotate cache up to REV for specified files

    If REV is not specified, use the config 'fastannotate.mainbranch'.

    If fastannotate.client is True, download the annotate cache from the
    server. Otherwise, build the annotate cache locally.

    The annotate cache will be built using the default diff and follow
    options and lives in '.hg/fastannotate/default'.
    """
    opts = pycompat.byteskwargs(opts)
    rev = opts.get(b'REV') or ui.config(b'fastannotate', b'mainbranch')
    if not rev:
        raise error.Abort(
            _(b'you need to provide a revision'),
            hint=_(b'set fastannotate.mainbranch or use --rev'),
        )
    if ui.configbool(b'fastannotate', b'unfilteredrepo'):
        repo = repo.unfiltered()
    ctx = scmutil.revsingle(repo, rev)
    m = scmutil.match(ctx, pats, opts)
    paths = list(ctx.walk(m))
    if util.safehasattr(repo, 'prefetchfastannotate'):
        # client
        if opts.get(b'REV'):
            raise error.Abort(_(b'--rev cannot be used for client'))
        repo.prefetchfastannotate(paths)
    else:
        # server, or full repo
        progress = ui.makeprogress(_(b'building'), total=len(paths))
        for i, path in enumerate(paths):
            progress.update(i)
            with facontext.annotatecontext(repo, path) as actx:
                try:
                    if actx.isuptodate(rev):
                        continue
                    actx.annotate(rev, rev)
                except (faerror.CannotReuseError, faerror.CorruptedFileError):
                    # the cache is broken (could happen with renaming so the
                    # file history gets invalidated). rebuild and try again.
                    ui.debug(b'fastannotate: %s: rebuilding broken cache\n' %
                             path)
                    actx.rebuild()
                    try:
                        actx.annotate(rev, rev)
                    except Exception as ex:
                        # possibly a bug, but should not stop us from building
                        # cache for other files.
                        ui.warn(
                            _(b'fastannotate: %s: failed to '
                              b'build cache: %r\n') % (path, ex))
        progress.complete()
Ejemplo n.º 28
0
def perfmergecalculate(ui, repo, rev):
    wctx = repo[None]
    rctx = scmutil.revsingle(repo, rev, rev)
    ancestor = wctx.ancestor(rctx)
    # we don't want working dir files to be stat'd in the benchmark, so prime
    # that cache
    wctx.dirty()
    def d():
        # acceptremote is True because we don't want prompts in the middle of
        # our benchmark
        merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False,
                               acceptremote=True)
    timer(d)
Ejemplo n.º 29
0
def feedentrygenerator(_context, entries, repo, url, urlbase):
    """Generator of mappings for pushlog feed entries field
    """
    for pushid, user, date, node in entries:
        ctx = scmutil.revsingle(repo, node)
        filesgen = [{'name': fn} for fn in ctx.files()]
        yield {
            'node': node,
            'date': isotime(date),
            'user': xmlescape(user),
            'urlbase': urlbase,
            'url': url,
            'files': templateutil.mappinglist(filesgen),
        }
Ejemplo n.º 30
0
def filelog(ui, repoagent, *pats, **opts):
    """show history of the specified file"""
    from tortoisehg.hgqt import filedialogs
    if len(pats) != 1:
        raise util.Abort(_('requires a single filename'))
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    filename = hglib.canonpaths(pats)[0]
    if opts.get('compare'):
        dlg = filedialogs.FileDiffDialog(repoagent, filename)
    else:
        dlg = filedialogs.FileLogDialog(repoagent, filename)
    dlg.goto(rev)
    return dlg
Ejemplo n.º 31
0
def filelog(ui, repoagent, *pats, **opts):
    """show history of the specified file"""
    from tortoisehg.hgqt import filedialogs
    if len(pats) != 1:
        raise util.Abort(_('requires a single filename'))
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    filename = hglib.canonpaths(pats)[0]
    if opts.get('compare'):
        dlg = filedialogs.FileDiffDialog(repoagent, filename)
    else:
        dlg = filedialogs.FileLogDialog(repoagent, filename)
    dlg.goto(rev)
    return dlg
Ejemplo n.º 32
0
def debugbuildannotatecache(ui, repo, *pats, **opts):
    """incrementally build fastannotate cache up to REV for specified files

    If REV is not specified, use the config 'fastannotate.mainbranch'.

    If fastannotate.client is True, download the annotate cache from the
    server. Otherwise, build the annotate cache locally.

    The annotate cache will be built using the default diff and follow
    options and lives in '.hg/fastannotate/default'.
    """
    rev = opts.get('REV') or ui.config('fastannotate', 'mainbranch')
    if not rev:
        raise error.Abort(_('you need to provide a revision'),
                          hint=_('set fastannotate.mainbranch or use --rev'))
    if ui.configbool('fastannotate', 'unfilteredrepo', True):
        repo = repo.unfiltered()
    ctx = scmutil.revsingle(repo, rev)
    m = scmutil.match(ctx, pats, opts)
    paths = list(ctx.walk(m))
    if util.safehasattr(repo, 'prefetchfastannotate'):
        # client
        if opts.get('REV'):
            raise error.Abort(_('--rev cannot be used for client'))
        repo.prefetchfastannotate(paths)
    else:
        # server, or full repo
        for i, path in enumerate(paths):
            ui.progress(_('building'), i, total=len(paths))
            with facontext.annotatecontext(repo, path) as actx:
                try:
                    if actx.isuptodate(rev):
                        continue
                    actx.annotate(rev, rev)
                except (faerror.CannotReuseError, faerror.CorruptedFileError):
                    # the cache is broken (could happen with renaming so the
                    # file history gets invalidated). rebuild and try again.
                    ui.debug('fastannotate: %s: rebuilding broken cache\n'
                             % path)
                    actx.rebuild()
                    try:
                        actx.annotate(rev, rev)
                    except Exception as ex:
                        # possibly a bug, but should not stop us from building
                        # cache for other files.
                        ui.warn(_('fastannotate: %s: failed to '
                                  'build cache: %r\n') % (path, ex))
        # clear the progress bar
        ui.write()
Ejemplo n.º 33
0
def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts):
    if not path:
        raise error.Abort(_(b'must specify file path to censor'))
    if not rev:
        raise error.Abort(_(b'must specify revision to censor'))

    wctx = repo[None]

    m = scmutil.match(wctx, (path,))
    if m.anypats() or len(m.files()) != 1:
        raise error.Abort(_(b'can only specify an explicit filename'))
    path = m.files()[0]
    flog = repo.file(path)
    if not len(flog):
        raise error.Abort(_(b'cannot censor file with no history'))

    rev = scmutil.revsingle(repo, rev, rev).rev()
    try:
        ctx = repo[rev]
    except KeyError:
        raise error.Abort(_(b'invalid revision identifier %s') % rev)

    try:
        fctx = ctx.filectx(path)
    except error.LookupError:
        raise error.Abort(_(b'file does not exist at revision %s') % rev)

    fnode = fctx.filenode()
    heads = []
    for headnode in repo.heads():
        hc = repo[headnode]
        if path in hc and hc.filenode(path) == fnode:
            heads.append(hc)
    if heads:
        headlist = b', '.join([short(c.node()) for c in heads])
        raise error.Abort(
            _(b'cannot censor file in heads (%s)') % headlist,
            hint=_(b'clean/delete and commit first'),
        )

    wp = wctx.parents()
    if ctx.node() in [p.node() for p in wp]:
        raise error.Abort(
            _(b'cannot censor working directory'),
            hint=_(b'clean/delete/update first'),
        )

    with repo.transaction(b'censor') as tr:
        flog.censorrevision(tr, fnode, tombstone=tombstone)
Ejemplo n.º 34
0
def backout(ui, repoagent, *pats, **opts):
    """backout tool"""
    from tortoisehg.hgqt import backout as backoutmod
    if opts.get('rev'):
        rev = opts.get('rev')
    elif len(pats) == 1:
        rev = pats[0]
    else:
        rev = 'tip'
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, rev).rev()
    msg = backoutmod.checkrev(repo, rev)
    if msg:
        raise util.Abort(hglib.fromunicode(msg))
    return backoutmod.BackoutDialog(repoagent, rev)
Ejemplo n.º 35
0
def perfmergecalculate(ui, repo, rev, **opts):
    timer, fm = gettimer(ui, opts)
    wctx = repo[None]
    rctx = scmutil.revsingle(repo, rev, rev)
    ancestor = wctx.ancestor(rctx)
    # we don't want working dir files to be stat'd in the benchmark, so prime
    # that cache
    wctx.dirty()
    def d():
        # acceptremote is True because we don't want prompts in the middle of
        # our benchmark
        merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
                               acceptremote=True, followcopies=True)
    timer(d)
    fm.end()
Ejemplo n.º 36
0
def gverify(ui, repo, **opts):
    '''verify that a Mercurial rev matches the corresponding Git rev

    Given a Mercurial revision that has a corresponding Git revision in the
    map, this attempts to answer whether that revision has the same contents as
    the corresponding Git revision.

    '''

    if opts.get('fsck'):
        for badsha, e in porcelain.fsck(repo.githandler.git):
            raise error.Abort(b'git repository is corrupt!')

    ctx = scmutil.revsingle(repo, opts.get('rev'), b'.')
    return verify.verify(ui, repo, ctx)
Ejemplo n.º 37
0
def backout(ui, repoagent, *pats, **opts):
    """backout tool"""
    from tortoisehg.hgqt import backout as backoutmod
    if opts.get('rev'):
        rev = opts.get('rev')
    elif len(pats) == 1:
        rev = pats[0]
    else:
        rev = 'tip'
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, rev).rev()
    msg = backoutmod.checkrev(repo, rev)
    if msg:
        raise util.Abort(hglib.fromunicode(msg))
    return backoutmod.BackoutDialog(repoagent, rev)
Ejemplo n.º 38
0
def close(ui, repo, branch=None, **opts):
    '''close a branch without updating to the branch
    
    Takes a branch and closes it with a commit message.

    This is extremely helpful in large repositories, where updating to
    different branches can take seconds.
    '''
    # check repository
    if not repo:
        raise util.Abort(
            _("there is no Mercurial repository here "
              "(.hg not found)"))
    # check for a branch
    if not branch:
        raise util.Abort('no branch provided')

    # get message
    m = opts.get('message')

    # get current context
    originalctx = repo[None]

    # the current working directory might have two parents (merge scenario)
    # check for a single parent and then pick correct parent
    if len(originalctx.parents()) != 1:
        raise util.Abort('current directory has an outstanding merge')
    originalctx = originalctx.parents()[0]

    # get branch change context
    branchctx = scmutil.revsingle(repo, branch)

    # move to other node and branch
    commands.debugsetparents(ui, repo, branchctx.rev())
    old_quiet = ui.quiet
    ui.quiet = True
    commands.branch(ui, repo, label=branchctx.branch())
    ui.quiet = old_quiet

    # commit close node
    commands.commit(ui, repo, close_branch=True, message=m, exclude="*")

    # switch back to original
    commands.debugsetparents(ui, repo, originalctx.rev())
    old_quiet = ui.quiet
    ui.quiet = True
    commands.branch(ui, repo, label=originalctx.branch())
    ui.quiet = old_quiet
Ejemplo n.º 39
0
def rebaseorfastforward(orig, ui, repo, dest, **args):
    """Wrapper for rebasemodule.rebase that fast-forwards the working directory
    and any active bookmark to the rebase destination if there is actually
    nothing to rebase.
    """
    prev = repo['.']
    destrev = scmutil.revsingle(repo, dest)
    common = destrev.ancestor(prev)
    if prev == common and destrev != prev:
        result = hg.update(repo, destrev.node())
        if bmactive(repo):
            with repo.wlock():
                bookmarks.update(repo, [prev.node()], destrev.node())
        ui.status(_("nothing to rebase - fast-forwarded to %s\n") % dest)
        return result
    return orig(ui, repo, dest=dest, **args)
Ejemplo n.º 40
0
def overridecat(orig, ui, repo, file1, *pats, **opts):
    ctx = scmutil.revsingle(repo, opts.get('rev'))
    err = 1
    notbad = set()
    m = scmutil.match(ctx, (file1,) + pats, opts)
    origmatchfn = m.matchfn
    def lfmatchfn(f):
        if origmatchfn(f):
            return True
        lf = lfutil.splitstandin(f)
        if lf is None:
            return False
        notbad.add(lf)
        return origmatchfn(lf)
    m.matchfn = lfmatchfn
    origbadfn = m.bad
    def lfbadfn(f, msg):
        if not f in notbad:
            origbadfn(f, msg)
    m.bad = lfbadfn
    for f in ctx.walk(m):
        fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                                 pathname=f)
        lf = lfutil.splitstandin(f)
        if lf is None or origmatchfn(f):
            # duplicating unreachable code from commands.cat
            data = ctx[f].data()
            if opts.get('decode'):
                data = repo.wwritedata(f, data)
            fp.write(data)
        else:
            hash = lfutil.readstandin(repo, lf, ctx.rev())
            if not lfutil.inusercache(repo.ui, hash):
                store = basestore._openstore(repo)
                success, missing = store.get([(lf, hash)])
                if len(success) != 1:
                    raise util.Abort(
                        _('largefile %s is not in cache and could not be '
                          'downloaded')  % lf)
            path = lfutil.usercachepath(repo.ui, hash)
            fpin = open(path, "rb")
            for chunk in util.filechunkiter(fpin, 128 * 1024):
                fp.write(chunk)
            fpin.close()
        fp.close()
        err = 0
    return err
Ejemplo n.º 41
0
def annotate(ui, repoagent, *pats, **opts):
    """annotate dialog"""
    from tortoisehg.hgqt import fileview
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    dlg = _filelog(ui, repoagent, *pats, **opts)
    dlg.setFileViewMode(fileview.AnnMode)
    dlg.goto(rev)
    if opts.get('line'):
        try:
            lineno = int(opts['line'])
        except ValueError:
            raise util.Abort(_('invalid line number: %s') % opts['line'])
        dlg.showLine(lineno)
    if opts.get('pattern'):
        dlg.setSearchPattern(hglib.tounicode(opts['pattern']))
    return dlg
Ejemplo n.º 42
0
def annotate(ui, repoagent, *pats, **opts):
    """annotate dialog"""
    from tortoisehg.hgqt import fileview
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    dlg = _filelog(ui, repoagent, *pats, **opts)
    dlg.setFileViewMode(fileview.AnnMode)
    dlg.goto(rev)
    if opts.get('line'):
        try:
            lineno = int(opts['line'])
        except ValueError:
            raise util.Abort(_('invalid line number: %s') % opts['line'])
        dlg.showLine(lineno)
    if opts.get('pattern'):
        dlg.setSearchPattern(hglib.tounicode(opts['pattern']))
    return dlg
Ejemplo n.º 43
0
def manifest(ui, repoagent, *pats, **opts):
    """display the current or given revision of the project manifest"""
    from tortoisehg.hgqt import revdetails as revdetailsmod
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    dlg = revdetailsmod.createManifestDialog(repoagent, rev)
    if pats:
        path = hglib.canonpaths(pats)[0]
        dlg.setFilePath(hglib.tounicode(path))
        if opts.get('line'):
            try:
                lineno = int(opts['line'])
            except ValueError:
                raise util.Abort(_('invalid line number: %s') % opts['line'])
            dlg.showLine(lineno)
    if opts.get('pattern'):
        dlg.setSearchPattern(hglib.tounicode(opts['pattern']))
    return dlg
Ejemplo n.º 44
0
def manifest(ui, repoagent, *pats, **opts):
    """display the current or given revision of the project manifest"""
    from tortoisehg.hgqt import revdetails as revdetailsmod
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    dlg = revdetailsmod.createManifestDialog(repoagent, rev)
    if pats:
        path = hglib.canonpaths(pats)[0]
        dlg.setFilePath(hglib.tounicode(path))
        if opts.get('line'):
            try:
                lineno = int(opts['line'])
            except ValueError:
                raise util.Abort(_('invalid line number: %s') % opts['line'])
            dlg.showLine(lineno)
    if opts.get('pattern'):
        dlg.setSearchPattern(hglib.tounicode(opts['pattern']))
    return dlg
Ejemplo n.º 45
0
def commitcmd(orig, ui, repo, *pats, **opts):
    if (opts.get("amend")
            and not opts.get("date")
            and not opts.get("to")
            and not ui.configbool('tweakdefaults', 'amendkeepdate')):
        opts["date"] = currentdate()

    rev = opts.get('reuse_message')
    if rev:
        invalidargs = ['message', 'logfile']
        currentinvalidargs = [ia for ia in invalidargs if opts.get(ia)]
        if currentinvalidargs:
            raise error.Abort(_('--reuse-message and --%s are '
                'mutually exclusive') % (currentinvalidargs[0]))

    if rev:
        opts['message'] = scmutil.revsingle(repo, rev).description()

    return orig(ui, repo, *pats, **opts)
Ejemplo n.º 46
0
    def write_tag(self, name, node):
        branch = self.repo[node].branch()
        # Calling self.repo.tag() doesn't append the tag to the correct
        # commit. So I copied some of localrepo._tag into here.
        # But that method, like much of mercurial's code, is ugly.
        # So I then rewrote it.

        tags_revision = revsingle(self.repo,
                                  hghex(branch_tip(self.repo, branch)))
        if '.hgtags' in tags_revision:
            old_tags = tags_revision['.hgtags'].data()
        else:
            old_tags = ''
        newtags = [old_tags]
        if old_tags and old_tags[-1] != '\n':
            newtags.append('\n')

        encoded_tag = encoding.fromlocal(name)
        tag_line = '%s %s' % (hghex(node), encoded_tag)
        if tag_line in old_tags:
            return  # Don't commit a tag that was previously committed
        newtags.append(tag_line)

        def get_filectx(repo, memctx, file):
            return memfilectx(file, ''.join(newtags))

        if name in self.parsed_tags:
            author, message = self.parsed_tags[name]
            user, date, tz = author
            date_tz = (date, tz)
        else:
            message = "Added tag %s for changeset %s" % (name, hgshort(node))
            user = self.hgrc.get("ui", "username", None)
            date_tz = None  # XXX insert current date here
        ctx = memctx(self.repo,
                     (branch_tip(self.repo, branch), self.NULL_PARENT),
                     message, ['.hgtags'], get_filectx, user, date_tz,
                     {'branch': branch})

        tmp = encoding.encoding
        encoding.encoding = 'utf-8'
        node = self.repo.commitctx(ctx)
        encoding.encoding = tmp
Ejemplo n.º 47
0
    def write_tag(self, ref):
        node = self.parsed_refs[ref]
        tag = git_to_hg_spaces(ref[len('refs/tags/'):])
        branch = self.repo[node].branch()
        # Calling self.repo.tag() doesn't append the tag to the correct
        # commit. So I copied some of localrepo._tag into here.
        # But that method, like much of mercurial's code, is ugly.
        # So I then rewrote it.

        tags_revision = revsingle(self.repo, hghex(branch_tip(self.repo, branch)))
        if '.hgtags' in tags_revision:
            old_tags = tags_revision['.hgtags'].data()
        else:
            old_tags = ''
        newtags = [old_tags]
        if old_tags and old_tags[-1] != '\n':
            newtags.append('\n')

        encoded_tag = encoding.fromlocal(tag)
        tag_line = '%s %s' % (hghex(node), encoded_tag)
        if tag_line in old_tags:
            return  # Don't commit a tag that was previously committed
        newtags.append(tag_line)

        def get_filectx(repo, memctx, file):
            return memfilectx(file, ''.join(newtags))

        if tag in self.parsed_tags:
            author, message = self.parsed_tags[tag]
            user, date, tz = author
            date_tz = (date, tz)
        else:
            message = "Added tag %s for changeset %s" % (tag, hgshort(node))
            user = None
            date_tz = None
        ctx = memctx(self.repo,
            (branch_tip(self.repo, branch), self.NULL_PARENT), message,
            ['.hgtags'], get_filectx, user, date_tz, {'branch': branch})

        tmp = encoding.encoding
        encoding.encoding = 'utf-8'
        node = self.repo.commitctx(ctx)
        encoding.encoding = tmp
Ejemplo n.º 48
0
def manifest(ui, repoagent, *pats, **opts):
    """display the current or given revision of the project manifest"""
    from tortoisehg.hgqt import manifestdialog
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    dlg = manifestdialog.ManifestDialog(repoagent, rev)
    if pats:
        path = hglib.canonpaths(pats)[0]
        if opts.get('line'):
            try:
                lineno = int(opts['line'])
            except ValueError:
                raise util.Abort(_('invalid line number: %s') % opts['line'])
        else:
            lineno = None
        dlg.setSource(hglib.tounicode(path), rev, lineno)
    if opts.get('pattern'):
        dlg.setSearchPattern(hglib.tounicode(opts['pattern']))
    return dlg
Ejemplo n.º 49
0
def manifest(ui, repoagent, *pats, **opts):
    """display the current or given revision of the project manifest"""
    from tortoisehg.hgqt import manifestdialog
    repo = repoagent.rawRepo()
    rev = scmutil.revsingle(repo, opts.get('rev')).rev()
    dlg = manifestdialog.ManifestDialog(repoagent, rev)
    if pats:
        path = hglib.canonpaths(pats)[0]
        if opts.get('line'):
            try:
                lineno = int(opts['line'])
            except ValueError:
                raise util.Abort(_('invalid line number: %s') % opts['line'])
        else:
            lineno = None
        dlg.setSource(hglib.tounicode(path), rev, lineno)
    if opts.get('pattern'):
        dlg.setSearchPattern(hglib.tounicode(opts['pattern']))
    return dlg
Ejemplo n.º 50
0
def catnotate(ui, repo, file1, *args, **opts):
    """output the current or given revision of files annotated with filename
    and line number.

    Print the specified files as they were at the given revision. If
    no revision is given, the parent of the working directory is used.

    Binary files are skipped unless -a/--text option is provided.
    """
    ctx = scmutil.revsingle(repo, opts.get('rev'))
    matcher = scmutil.match(ctx, (file1,) + args, opts)
    prefix = ''

    err = 1
    # modified and stripped mercurial.cmdutil.cat follows
    def write(path):
        fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                         pathname=os.path.join(prefix, path))
        data = ctx[path].data()
        if not opts.get('text') and util.binary(data):
            fp.write("%s: binary file\n" % path)
            return

        for (num, line) in enumerate(data.split("\n"), start=1):
            line = line + "\n"
            fp.write("%s:%s: %s" % (path, num, line))
        fp.close()

    # Automation often uses hg cat on single files, so special case it
    # for performance to avoid the cost of parsing the manifest.
    if len(matcher.files()) == 1 and not matcher.anypats():
        file = matcher.files()[0]
        mfl = repo.manifestlog
        mfnode = ctx.manifestnode()
        if mfnode and mfl[mfnode].find(file)[0]:
            write(file)
            return 0

    for abs in ctx.walk(matcher):
        write(abs)
        err = 0

    return err
Ejemplo n.º 51
0
def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
    """generate paths matching given patterns"""
    perfhack = repo.ui.configbool(b'fastannotate', b'perfhack')

    # disable perfhack if:
    # a) any walkopt is used
    # b) if we treat pats as plain file names, some of them do not have
    #    corresponding linelog files
    if perfhack:
        # cwd related to reporoot
        reporoot = os.path.dirname(repo.path)
        reldir = os.path.relpath(encoding.getcwd(), reporoot)
        if reldir == b'.':
            reldir = b''
        if any(opts.get(o[1]) for o in commands.walkopts):  # a)
            perfhack = False
        else:  # b)
            relpats = [
                os.path.relpath(p, reporoot) if os.path.isabs(p) else p
                for p in pats
            ]
            # disable perfhack on '..' since it allows escaping from the repo
            if any((b'..' in f or not os.path.isfile(
                    facontext.pathhelper(repo, f, aopts).linelogpath))
                   for f in relpats):
                perfhack = False

    # perfhack: emit paths directory without checking with manifest
    # this can be incorrect if the rev dos not have file.
    if perfhack:
        for p in relpats:
            yield os.path.join(reldir, p)
    else:

        def bad(x, y):
            raise error.Abort(b"%s: %s" % (x, y))

        ctx = scmutil.revsingle(repo, rev)
        m = scmutil.match(ctx, pats, opts, badfn=bad)
        for p in ctx.walk(m):
            yield p
Ejemplo n.º 52
0
def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
    """generate paths matching given patterns"""
    perfhack = repo.ui.configbool('fastannotate', 'perfhack')

    # disable perfhack if:
    # a) any walkopt is used
    # b) if we treat pats as plain file names, some of them do not have
    #    corresponding linelog files
    if perfhack:
        # cwd related to reporoot
        reporoot = os.path.dirname(repo.path)
        reldir = os.path.relpath(os.getcwd(), reporoot)
        if reldir == '.':
            reldir = ''
        if any(opts.get(o[1]) for o in commands.walkopts): # a)
            perfhack = False
        else: # b)
            relpats = [os.path.relpath(p, reporoot) if os.path.isabs(p) else p
                       for p in pats]
            # disable perfhack on '..' since it allows escaping from the repo
            if any(('..' in f or
                    not os.path.isfile(
                        facontext.pathhelper(repo, f, aopts).linelogpath))
                   for f in relpats):
                perfhack = False

    # perfhack: emit paths directory without checking with manifest
    # this can be incorrect if the rev dos not have file.
    if perfhack:
        for p in relpats:
            yield os.path.join(reldir, p)
    else:
        def bad(x, y):
            raise error.Abort("%s: %s" % (x, y))
        ctx = scmutil.revsingle(repo, rev)
        m = scmutil.match(ctx, pats, opts, badfn=bad)
        for p in ctx.walk(m):
            yield p
Ejemplo n.º 53
0
def prepushrebasehooks(op, params, bundle, bundlefile):
    onto = params.get('onto')
    prelockonto = resolveonto(op.repo, onto or donotrebasemarker)
    prelockontonode = prelockonto.hex() if prelockonto else None

    # Allow running hooks on the new commits before we take the lock
    prelockrebaseargs = op.hookargs.copy()
    prelockrebaseargs['source'] = 'push'
    prelockrebaseargs['bundle2'] = '1'
    prelockrebaseargs['node'] = scmutil.revsingle(bundle,
                                                  'min(bundle())').hex()
    prelockrebaseargs['node_onto'] = prelockontonode
    if onto:
        prelockrebaseargs['onto'] = onto
    prelockrebaseargs['hook_bundlepath'] = bundlefile

    for path in op.records[treepackrecords]:
        if ':' in path:
            raise RuntimeError(_("tree pack path may not contain colon (%s)") %
                               path)
    prelockrebaseargs['hook_packpaths'] = ':'.join(op.records[treepackrecords])

    op.repo.hook("prepushrebase", throw=True, **prelockrebaseargs)
Ejemplo n.º 54
0
def bundle2rebase(op, part):
    '''unbundle a bundle2 containing a changegroup to rebase'''

    params = part.params
    tr = op.gettransaction()
    hookargs = dict(tr.hookargs)

    bundlefile = None
    onto = scmutil.revsingle(op.repo, params['onto'])
    if not params['newhead']:
        if not op.repo.revs('%r and head()', params['onto']):
            raise util.Abort(_('rebase would produce a new head on server'))

    try: # guards bundlefile
        bundlefile = _makebundlefile(part)
        bundle = bundlerepository(op.repo.ui, op.repo.root, bundlefile)
        revs = _getrevs(bundle, onto)

        op.repo.hook("prechangegroup", **hookargs)

        replacements = {}
        added = []

        for rev in revs:
            newrev = _graft(op.repo, onto, rev)
            onto = op.repo[newrev]
            replacements[rev.node()] = onto.node()
            added.append(onto.node())
        _buildobsolete(replacements, bundle, op.repo)
    finally:
        try:
            if bundlefile:
                os.unlink(bundlefile)
        except OSError, e:
            if e.errno != errno.ENOENT:
                raise
Ejemplo n.º 55
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing " b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse " b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
            ui.warn(
                b"(shared store does not belong to pooled storage; "
                b"deleting destination to improve efficiency)\n"
            )
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(
            b"(dest has an active working directory lock; assuming it is "
            b"left over from a previous process and that the destination "
            b"is corrupt; deleting it just to be sure)\n"
        )
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(
                b"reached maximum number of network attempts; " b"giving up\n"
            )

        ui.warn(
            b"(retrying after network failure on attempt %d of %d)\n"
            % (networkattempts[0], networkattemptlimit)
        )

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(_(b"unexpected response from remote server:")):
                ui.warn(b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" % pycompat.bytestr(e.reason))
                handlenetworkfailure()
                return True
            else:
                ui.warn(
                    b"unhandled URLError; reason type: %s; value: %s\n"
                    % (e.reason.__class__.__name__, e.reason)
                )
        else:
            ui.warn(
                b"unhandled exception during network operation; type: %s; "
                b"value: %s\n" % (e.__class__.__name__, e)
            )

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone " b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(
            b"(shared store has an active lock; assuming it is left "
            b"over from a previous process and that the store is "
            b"corrupt; deleting store and destination just to be "
            b"sure)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(
            b"(shared store missing requires file; this is a really "
            b"odd failure; deleting store and destination)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(
                b"(shared store missing requirements: %s; deleting "
                b"store and destination to ensure optimal behavior)\n"
                % b", ".join(sorted(missing))
            )
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires", "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(
                    b"(remote resolved %s to %s; "
                    b"result is not deterministic)\n" % (branch, checkoutrevision)
                )

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purgeext = extensions.find(b"purge")

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, ""
                    )

            with timeit("purge", "purge"):
                if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{"print": None, "print0": None, "dirs": None, "files": None}
                ):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(
                b"sparse profile %s does not exist at revision "
                b"%s" % (sparse_profile, checkoutrevision)
            )

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(
                repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
            )
        else:
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
            ui.write(
                b"(sparse profile %s already set; no need to update "
                b"sparse config)\n" % sparse_profile
            )
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(
                    b"(replacing existing sparse config with profile "
                    b"%s)\n" % sparse_profile
                )
            else:
                ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(
                            repo, [], [], [sparse_profile], force=True
                        ),
                    )
                )

                repo.ui.status(
                    b"%d files added, %d files dropped, "
                    b"%d files conflicting\n" % tuple(fcounts)
                )

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
Ejemplo n.º 56
0
def histedit(ui, repo, *freeargs, **opts):
    """interactively edit changeset history

    This command edits changesets between ANCESTOR and the parent of
    the working directory.

    With --outgoing, this edits changesets not found in the
    destination repository. If URL of the destination is omitted, the
    'default-push' (or 'default') path will be used.

    For safety, this command is aborted, also if there are ambiguous
    outgoing revisions which may confuse users: for example, there are
    multiple branches containing outgoing revisions.

    Use "min(outgoing() and ::.)" or similar revset specification
    instead of --outgoing to specify edit target revision exactly in
    such ambiguous situation. See :hg:`help revsets` for detail about
    selecting revisions.
    """
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, "mq", None)
    if mq and mq.applied:
        raise util.Abort(_("source has mq patches applied"))

    # basic argument incompatibility processing
    outg = opts.get("outgoing")
    cont = opts.get("continue")
    abort = opts.get("abort")
    force = opts.get("force")
    rules = opts.get("commands", "")
    revs = opts.get("rev", [])
    goal = "new"  # This invocation goal, in new, continue, abort
    if force and not outg:
        raise util.Abort(_("--force only allowed with --outgoing"))
    if cont:
        if util.any((outg, abort, revs, freeargs, rules)):
            raise util.Abort(_("no arguments allowed with --continue"))
        goal = "continue"
    elif abort:
        if util.any((outg, revs, freeargs, rules)):
            raise util.Abort(_("no arguments allowed with --abort"))
        goal = "abort"
    else:
        if os.path.exists(os.path.join(repo.path, "histedit-state")):
            raise util.Abort(_("history edit already in progress, try " "--continue or --abort"))
        if outg:
            if revs:
                raise util.Abort(_("no revisions allowed with --outgoing"))
            if len(freeargs) > 1:
                raise util.Abort(_("only one repo argument allowed with --outgoing"))
        else:
            revs.extend(freeargs)
            if len(revs) != 1:
                raise util.Abort(_("histedit requires exactly one ancestor revision"))

    if goal == "continue":
        (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
        parentctx = repo[parentctxnode]
        parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts)
        replacements.extend(repl)
    elif goal == "abort":
        (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
        mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements)
        ui.debug("restore wc to old parent %s\n" % node.short(topmost))
        # check whether we should update away
        parentnodes = [c.node() for c in repo[None].parents()]
        for n in leafs | set([parentctxnode]):
            if n in parentnodes:
                hg.clean(repo, topmost)
                break
        else:
            pass
        cleanupnode(ui, repo, "created", tmpnodes)
        cleanupnode(ui, repo, "temp", leafs)
        os.unlink(os.path.join(repo.path, "histedit-state"))
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            root = revs[0]
            root = scmutil.revsingle(repo, root).node()

        keep = opts.get("keep", False)
        revs = between(repo, root, topmost, keep)
        if not revs:
            raise util.Abort(_("%s is not an ancestor of working directory") % node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            rules = "\n".join([makedesc(c) for c in ctxs])
            rules += "\n\n"
            rules += editcomment % (node.short(root), node.short(topmost))
            rules = ui.edit(rules, ui.username())
            # Save edit rules in .hg/histedit-last-edit.txt in case
            # the user needs to ask for help after something
            # surprising happens.
            f = open(repo.join("histedit-last-edit.txt"), "w")
            f.write(rules)
            f.close()
        else:
            if rules == "-":
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == "#"]
        rules = verifyrules(rules, repo, ctxs)

        parentctx = repo[root].parents()[0]
        keep = opts.get("keep", False)
        replacements = []

    while rules:
        writestate(repo, parentctx.node(), rules, keep, topmost, replacements)
        action, ha = rules.pop(0)
        ui.debug("histedit: processing %s %s\n" % (action, ha))
        actfunc = actiontable[action]
        parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts)
        replacements.extend(replacement_)

    hg.update(repo, parentctx.node())

    mapping, tmpnodes, created, ntm = processreplacement(repo, replacements)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug("histedit: %s is dropped\n" % node.short(prec))
            else:
                ui.debug("histedit: %s is replaced by %s\n" % (node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = "histedit:                            %s"
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if not keep:
        if mapping:
            movebookmarks(ui, repo, mapping, topmost, ntm)
            # TODO update mq state
        if obsolete._enabled:
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec], tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, "replaced", mapping)

    cleanupnode(ui, repo, "temp", tmpnodes)
    os.unlink(os.path.join(repo.path, "histedit-state"))
    if os.path.exists(repo.sjoin("undo")):
        os.unlink(repo.sjoin("undo"))
Ejemplo n.º 57
0
def _docensor(ui, repo, path, rev='', tombstone='', **opts):
    if not path:
        raise error.Abort(_('must specify file path to censor'))
    if not rev:
        raise error.Abort(_('must specify revision to censor'))

    wctx = repo[None]

    m = scmutil.match(wctx, (path,))
    if m.anypats() or len(m.files()) != 1:
        raise error.Abort(_('can only specify an explicit filename'))
    path = m.files()[0]
    flog = repo.file(path)
    if not len(flog):
        raise error.Abort(_('cannot censor file with no history'))

    rev = scmutil.revsingle(repo, rev, rev).rev()
    try:
        ctx = repo[rev]
    except KeyError:
        raise error.Abort(_('invalid revision identifier %s') % rev)

    try:
        fctx = ctx.filectx(path)
    except error.LookupError:
        raise error.Abort(_('file does not exist at revision %s') % rev)

    fnode = fctx.filenode()
    headctxs = [repo[c] for c in repo.heads()]
    heads = [c for c in headctxs if path in c and c.filenode(path) == fnode]
    if heads:
        headlist = ', '.join([short(c.node()) for c in heads])
        raise error.Abort(_('cannot censor file in heads (%s)') % headlist,
            hint=_('clean/delete and commit first'))

    wp = wctx.parents()
    if ctx.node() in [p.node() for p in wp]:
        raise error.Abort(_('cannot censor working directory'),
            hint=_('clean/delete/update first'))

    flogv = flog.version & 0xFFFF
    if flogv != revlog.REVLOGNG:
        raise error.Abort(
            _('censor does not support revlog version %d') % (flogv,))

    tombstone = filelog.packmeta({"censored": tombstone}, "")

    crev = fctx.filerev()

    if len(tombstone) > flog.rawsize(crev):
        raise error.Abort(_(
            'censor tombstone must be no longer than censored data'))

    # Using two files instead of one makes it easy to rewrite entry-by-entry
    idxread = repo.svfs(flog.indexfile, 'r')
    idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True)
    if flog.version & revlog.REVLOGNGINLINEDATA:
        dataread, datawrite = idxread, idxwrite
    else:
        dataread = repo.svfs(flog.datafile, 'r')
        datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True)

    # Copy all revlog data up to the entry to be censored.
    rio = revlog.revlogio()
    offset = flog.start(crev)

    for chunk in util.filechunkiter(idxread, limit=crev * rio.size):
        idxwrite.write(chunk)
    for chunk in util.filechunkiter(dataread, limit=offset):
        datawrite.write(chunk)

    def rewriteindex(r, newoffs, newdata=None):
        """Rewrite the index entry with a new data offset and optional new data.

        The newdata argument, if given, is a tuple of three positive integers:
        (new compressed, new uncompressed, added flag bits).
        """
        offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r]
        flags = revlog.gettype(offlags)
        if newdata:
            comp, uncomp, nflags = newdata
            flags |= nflags
        offlags = revlog.offset_type(newoffs, flags)
        e = (offlags, comp, uncomp, r, link, p1, p2, nodeid)
        idxwrite.write(rio.packentry(e, None, flog.version, r))
        idxread.seek(rio.size, 1)

    def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS):
        """Write the given full text to the filelog with the given data offset.

        Returns:
            The integer number of data bytes written, for tracking data offsets.
        """
        flag, compdata = flog.compress(data)
        newcomp = len(flag) + len(compdata)
        rewriteindex(r, offs, (newcomp, len(data), nflags))
        datawrite.write(flag)
        datawrite.write(compdata)
        dataread.seek(flog.length(r), 1)
        return newcomp

    # Rewrite censored revlog entry with (padded) tombstone data.
    pad = ' ' * (flog.rawsize(crev) - len(tombstone))
    offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)

    # Rewrite all following filelog revisions fixing up offsets and deltas.
    for srev in xrange(crev + 1, len(flog)):
        if crev in flog.parentrevs(srev):
            # Immediate children of censored node must be re-added as fulltext.
            try:
                revdata = flog.revision(srev)
            except error.CensoredNodeError as e:
                revdata = e.tombstone
            dlen = rewrite(srev, offset, revdata)
        else:
            # Copy any other revision data verbatim after fixing up the offset.
            rewriteindex(srev, offset)
            dlen = flog.length(srev)
            for chunk in util.filechunkiter(dataread, limit=dlen):
                datawrite.write(chunk)
        offset += dlen

    idxread.close()
    idxwrite.close()
    if dataread is not idxread:
        dataread.close()
        datawrite.close()
Ejemplo n.º 58
0
def rebase(ui, repo, **opts):
    """move changeset (and descendants) to a different branch

    Rebase uses repeated merging to graft changesets from one part of
    history (the source) onto another (the destination). This can be
    useful for linearizing *local* changes relative to a master
    development tree.

    You should not rebase changesets that have already been shared
    with others. Doing so will force everybody else to perform the
    same rebase or they will end up with duplicated changesets after
    pulling in your rebased changesets.

    In its default configuration, Mercurial will prevent you from
    rebasing published changes. See :hg:`help phases` for details.

    If you don't specify a destination changeset (``-d/--dest``),
    rebase uses the current branch tip as the destination. (The
    destination changeset is not modified by rebasing, but new
    changesets are added as its descendants.)

    You can specify which changesets to rebase in two ways: as a
    "source" changeset or as a "base" changeset. Both are shorthand
    for a topologically related set of changesets (the "source
    branch"). If you specify source (``-s/--source``), rebase will
    rebase that changeset and all of its descendants onto dest. If you
    specify base (``-b/--base``), rebase will select ancestors of base
    back to but not including the common ancestor with dest. Thus,
    ``-b`` is less precise but more convenient than ``-s``: you can
    specify any changeset in the source branch, and rebase will select
    the whole branch. If you specify neither ``-s`` nor ``-b``, rebase
    uses the parent of the working directory as the base.

    For advanced usage, a third way is available through the ``--rev``
    option. It allows you to specify an arbitrary set of changesets to
    rebase. Descendants of revs you specify with this option are not
    automatically included in the rebase.

    By default, rebase recreates the changesets in the source branch
    as descendants of dest and then destroys the originals. Use
    ``--keep`` to preserve the original source changesets. Some
    changesets in the source branch (e.g. merges from the destination
    branch) may be dropped if they no longer contribute any change.

    One result of the rules for selecting the destination changeset
    and source branch is that, unlike ``merge``, rebase will do
    nothing if you are at the branch tip of a named branch
    with two heads. You need to explicitly specify source and/or
    destination (or ``update`` to the other head, if it's the head of
    the intended source branch).

    If a rebase is interrupted to manually resolve a merge, it can be
    continued with --continue/-c or aborted with --abort/-a.

    .. container:: verbose

      Examples:

      - move "local changes" (current commit back to branching point)
        to the current branch tip after a pull::

          hg rebase

      - move a single changeset to the stable branch::

          hg rebase -r 5f493448 -d stable

      - splice a commit and all its descendants onto another part of history::

          hg rebase --source c0c3 --dest 4cf9

      - rebase everything on a branch marked by a bookmark onto the
        default branch::

          hg rebase --base myfeature --dest default

      - collapse a sequence of changes into a single commit::

          hg rebase --collapse -r 1520:1525 -d .

      - move a named branch while preserving its name::

          hg rebase -r "branch(featureX)" -d 1.3 --keepbranches

    Returns 0 on success, 1 if nothing to rebase or there are
    unresolved conflicts.

    """
    originalwd = target = None
    activebookmark = None
    external = nullrev
    # Mapping between the old revision id and either what is the new rebased
    # revision or what needs to be done with the old revision. The state dict
    # will be what contains most of the rebase progress state.
    state = {}
    skipped = set()
    targetancestors = set()


    lock = wlock = None
    try:
        wlock = repo.wlock()
        lock = repo.lock()

        # Validate input and define rebasing points
        destf = opts.get('dest', None)
        srcf = opts.get('source', None)
        basef = opts.get('base', None)
        revf = opts.get('rev', [])
        contf = opts.get('continue')
        abortf = opts.get('abort')
        collapsef = opts.get('collapse', False)
        collapsemsg = cmdutil.logmessage(ui, opts)
        e = opts.get('extrafn') # internal, used by e.g. hgsubversion
        extrafns = [_savegraft]
        if e:
            extrafns = [e]
        keepf = opts.get('keep', False)
        keepbranchesf = opts.get('keepbranches', False)
        # keepopen is not meant for use on the command line, but by
        # other extensions
        keepopen = opts.get('keepopen', False)

        if opts.get('interactive'):
            try:
                if extensions.find('histedit'):
                    enablehistedit = ''
            except KeyError:
                enablehistedit = " --config extensions.histedit="
            help = "hg%s help -e histedit" % enablehistedit
            msg = _("interactive history editing is supported by the "
                    "'histedit' extension (see \"%s\")") % help
            raise error.Abort(msg)

        if collapsemsg and not collapsef:
            raise error.Abort(
                _('message can only be specified with collapse'))

        if contf or abortf:
            if contf and abortf:
                raise error.Abort(_('cannot use both abort and continue'))
            if collapsef:
                raise error.Abort(
                    _('cannot use collapse with continue or abort'))
            if srcf or basef or destf:
                raise error.Abort(
                    _('abort and continue do not allow specifying revisions'))
            if abortf and opts.get('tool', False):
                ui.warn(_('tool option will be ignored\n'))

            try:
                (originalwd, target, state, skipped, collapsef, keepf,
                 keepbranchesf, external, activebookmark) = restorestatus(repo)
            except error.RepoLookupError:
                if abortf:
                    clearstatus(repo)
                    repo.ui.warn(_('rebase aborted (no revision is removed,'
                                   ' only broken state is cleared)\n'))
                    return 0
                else:
                    msg = _('cannot continue inconsistent rebase')
                    hint = _('use "hg rebase --abort" to clear broken state')
                    raise error.Abort(msg, hint=hint)
            if abortf:
                return abort(repo, originalwd, target, state,
                             activebookmark=activebookmark)
        else:
            if srcf and basef:
                raise error.Abort(_('cannot specify both a '
                                   'source and a base'))
            if revf and basef:
                raise error.Abort(_('cannot specify both a '
                                   'revision and a base'))
            if revf and srcf:
                raise error.Abort(_('cannot specify both a '
                                   'revision and a source'))

            cmdutil.checkunfinished(repo)
            cmdutil.bailifchanged(repo)

            if destf:
                dest = scmutil.revsingle(repo, destf)
            else:
                dest = repo[_destrebase(repo)]
                destf = str(dest)

            if revf:
                rebaseset = scmutil.revrange(repo, revf)
                if not rebaseset:
                    ui.status(_('empty "rev" revision set - '
                                'nothing to rebase\n'))
                    return _nothingtorebase()
            elif srcf:
                src = scmutil.revrange(repo, [srcf])
                if not src:
                    ui.status(_('empty "source" revision set - '
                                'nothing to rebase\n'))
                    return _nothingtorebase()
                rebaseset = repo.revs('(%ld)::', src)
                assert rebaseset
            else:
                base = scmutil.revrange(repo, [basef or '.'])
                if not base:
                    ui.status(_('empty "base" revision set - '
                                "can't compute rebase set\n"))
                    return _nothingtorebase()
                commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first()
                if commonanc is not None:
                    rebaseset = repo.revs('(%d::(%ld) - %d)::',
                                          commonanc, base, commonanc)
                else:
                    rebaseset = []

                if not rebaseset:
                    # transform to list because smartsets are not comparable to
                    # lists. This should be improved to honor laziness of
                    # smartset.
                    if list(base) == [dest.rev()]:
                        if basef:
                            ui.status(_('nothing to rebase - %s is both "base"'
                                        ' and destination\n') % dest)
                        else:
                            ui.status(_('nothing to rebase - working directory '
                                        'parent is also destination\n'))
                    elif not repo.revs('%ld - ::%d', base, dest):
                        if basef:
                            ui.status(_('nothing to rebase - "base" %s is '
                                        'already an ancestor of destination '
                                        '%s\n') %
                                      ('+'.join(str(repo[r]) for r in base),
                                       dest))
                        else:
                            ui.status(_('nothing to rebase - working '
                                        'directory parent is already an '
                                        'ancestor of destination %s\n') % dest)
                    else: # can it happen?
                        ui.status(_('nothing to rebase from %s to %s\n') %
                                  ('+'.join(str(repo[r]) for r in base), dest))
                    return _nothingtorebase()

            allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
            if (not (keepf or allowunstable)
                  and repo.revs('first(children(%ld) - %ld)',
                                rebaseset, rebaseset)):
                raise error.Abort(
                    _("can't remove original changesets with"
                      " unrebased descendants"),
                    hint=_('use --keep to keep original changesets'))

            obsoletenotrebased = {}
            if ui.configbool('experimental', 'rebaseskipobsolete'):
                rebasesetrevs = set(rebaseset)
                obsoletenotrebased = _computeobsoletenotrebased(repo,
                                                                rebasesetrevs,
                                                                dest)

                # - plain prune (no successor) changesets are rebased
                # - split changesets are not rebased if at least one of the
                # changeset resulting from the split is an ancestor of dest
                rebaseset = rebasesetrevs - set(obsoletenotrebased)
            result = buildstate(repo, dest, rebaseset, collapsef,
                                obsoletenotrebased)

            if not result:
                # Empty state built, nothing to rebase
                ui.status(_('nothing to rebase\n'))
                return _nothingtorebase()

            root = min(rebaseset)
            if not keepf and not repo[root].mutable():
                raise error.Abort(_("can't rebase public changeset %s")
                                 % repo[root],
                                 hint=_('see "hg help phases" for details'))

            originalwd, target, state = result
            if collapsef:
                targetancestors = repo.changelog.ancestors([target],
                                                           inclusive=True)
                external = externalparent(repo, state, targetancestors)

            if dest.closesbranch() and not keepbranchesf:
                ui.status(_('reopening closed branch head %s\n') % dest)

        if keepbranchesf:
            # insert _savebranch at the start of extrafns so if
            # there's a user-provided extrafn it can clobber branch if
            # desired
            extrafns.insert(0, _savebranch)
            if collapsef:
                branches = set()
                for rev in state:
                    branches.add(repo[rev].branch())
                    if len(branches) > 1:
                        raise error.Abort(_('cannot collapse multiple named '
                            'branches'))

        # Rebase
        if not targetancestors:
            targetancestors = repo.changelog.ancestors([target], inclusive=True)

        # Keep track of the current bookmarks in order to reset them later
        currentbookmarks = repo._bookmarks.copy()
        activebookmark = activebookmark or repo._activebookmark
        if activebookmark:
            bookmarks.deactivate(repo)

        extrafn = _makeextrafn(extrafns)

        sortedstate = sorted(state)
        total = len(sortedstate)
        pos = 0
        for rev in sortedstate:
            ctx = repo[rev]
            desc = '%d:%s "%s"' % (ctx.rev(), ctx,
                                   ctx.description().split('\n', 1)[0])
            names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
            if names:
                desc += ' (%s)' % ' '.join(names)
            pos += 1
            if state[rev] == revtodo:
                ui.status(_('rebasing %s\n') % desc)
                ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
                            _('changesets'), total)
                p1, p2, base = defineparents(repo, rev, target, state,
                                             targetancestors)
                storestatus(repo, originalwd, target, state, collapsef, keepf,
                            keepbranchesf, external, activebookmark)
                if len(repo.parents()) == 2:
                    repo.ui.debug('resuming interrupted rebase\n')
                else:
                    try:
                        ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
                                     'rebase')
                        stats = rebasenode(repo, rev, p1, base, state,
                                           collapsef, target)
                        if stats and stats[3] > 0:
                            raise error.InterventionRequired(
                                _('unresolved conflicts (see hg '
                                  'resolve, then hg rebase --continue)'))
                    finally:
                        ui.setconfig('ui', 'forcemerge', '', 'rebase')
                if not collapsef:
                    merging = p2 != nullrev
                    editform = cmdutil.mergeeditform(merging, 'rebase')
                    editor = cmdutil.getcommiteditor(editform=editform, **opts)
                    newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn,
                                           editor=editor,
                                           keepbranches=keepbranchesf)
                else:
                    # Skip commit if we are collapsing
                    repo.dirstate.beginparentchange()
                    repo.setparents(repo[p1].node())
                    repo.dirstate.endparentchange()
                    newnode = None
                # Update the state
                if newnode is not None:
                    state[rev] = repo[newnode].rev()
                    ui.debug('rebased as %s\n' % short(newnode))
                else:
                    if not collapsef:
                        ui.warn(_('note: rebase of %d:%s created no changes '
                                  'to commit\n') % (rev, ctx))
                        skipped.add(rev)
                    state[rev] = p1
                    ui.debug('next revision set to %s\n' % p1)
            elif state[rev] == nullmerge:
                ui.debug('ignoring null merge rebase of %s\n' % rev)
            elif state[rev] == revignored:
                ui.status(_('not rebasing ignored %s\n') % desc)
            elif state[rev] == revprecursor:
                targetctx = repo[obsoletenotrebased[rev]]
                desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx,
                             targetctx.description().split('\n', 1)[0])
                msg = _('note: not rebasing %s, already in destination as %s\n')
                ui.status(msg % (desc, desctarget))
            else:
                ui.status(_('already rebased %s as %s\n') %
                          (desc, repo[state[rev]]))

        ui.progress(_('rebasing'), None)
        ui.note(_('rebase merging completed\n'))

        if collapsef and not keepopen:
            p1, p2, _base = defineparents(repo, min(state), target,
                                          state, targetancestors)
            editopt = opts.get('edit')
            editform = 'rebase.collapse'
            if collapsemsg:
                commitmsg = collapsemsg
            else:
                commitmsg = 'Collapsed revision'
                for rebased in state:
                    if rebased not in skipped and state[rebased] > nullmerge:
                        commitmsg += '\n* %s' % repo[rebased].description()
                editopt = True
            editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
            newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
                                   extrafn=extrafn, editor=editor,
                                   keepbranches=keepbranchesf)
            if newnode is None:
                newrev = target
            else:
                newrev = repo[newnode].rev()
            for oldrev in state.iterkeys():
                if state[oldrev] > nullmerge:
                    state[oldrev] = newrev

        if 'qtip' in repo.tags():
            updatemq(repo, state, skipped, **opts)

        if currentbookmarks:
            # Nodeids are needed to reset bookmarks
            nstate = {}
            for k, v in state.iteritems():
                if v > nullmerge:
                    nstate[repo[k].node()] = repo[v].node()
            # XXX this is the same as dest.node() for the non-continue path --
            # this should probably be cleaned up
            targetnode = repo[target].node()

        # restore original working directory
        # (we do this before stripping)
        newwd = state.get(originalwd, originalwd)
        if newwd < 0:
            # original directory is a parent of rebase set root or ignored
            newwd = originalwd
        if newwd not in [c.rev() for c in repo[None].parents()]:
            ui.note(_("update back to initial working directory parent\n"))
            hg.updaterepo(repo, newwd, False)

        if not keepf:
            collapsedas = None
            if collapsef:
                collapsedas = newnode
            clearrebased(ui, repo, state, skipped, collapsedas)

        if currentbookmarks:
            updatebookmarks(repo, targetnode, nstate, currentbookmarks)
            if activebookmark not in repo._bookmarks:
                # active bookmark was divergent one and has been deleted
                activebookmark = None

        clearstatus(repo)
        ui.note(_("rebase completed\n"))
        util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
        if skipped:
            ui.note(_("%d revisions have been skipped\n") % len(skipped))

        if (activebookmark and
            repo['.'].node() == repo._bookmarks[activebookmark]):
                bookmarks.activate(repo, activebookmark)

    finally:
        release(lock, wlock)
Ejemplo n.º 59
0
def overriderevert(orig, ui, repo, *pats, **opts):
    # Because we put the standins in a bad state (by updating them)
    # and then return them to a correct state we need to lock to
    # prevent others from changing them in their incorrect state.
    wlock = repo.wlock()
    try:
        lfdirstate = lfutil.openlfdirstate(ui, repo)
        (modified, added, removed, missing, unknown, ignored, clean) = \
            lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
        lfdirstate.write()
        for lfile in modified:
            lfutil.updatestandin(repo, lfutil.standin(lfile))
        for lfile in missing:
            if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
                os.unlink(repo.wjoin(lfutil.standin(lfile)))

        try:
            ctx = scmutil.revsingle(repo, opts.get('rev'))
            oldmatch = None # for the closure
            def overridematch(ctx, pats=[], opts={}, globbed=False,
                    default='relpath'):
                match = oldmatch(ctx, pats, opts, globbed, default)
                m = copy.copy(match)
                def tostandin(f):
                    if lfutil.standin(f) in ctx:
                        return lfutil.standin(f)
                    elif lfutil.standin(f) in repo[None]:
                        return None
                    return f
                m._files = [tostandin(f) for f in m._files]
                m._files = [f for f in m._files if f is not None]
                m._fmap = set(m._files)
                m._always = False
                origmatchfn = m.matchfn
                def matchfn(f):
                    if lfutil.isstandin(f):
                        # We need to keep track of what largefiles are being
                        # matched so we know which ones to update later --
                        # otherwise we accidentally revert changes to other
                        # largefiles. This is repo-specific, so duckpunch the
                        # repo object to keep the list of largefiles for us
                        # later.
                        if origmatchfn(lfutil.splitstandin(f)) and \
                                (f in repo[None] or f in ctx):
                            lfileslist = getattr(repo, '_lfilestoupdate', [])
                            lfileslist.append(lfutil.splitstandin(f))
                            repo._lfilestoupdate = lfileslist
                            return True
                        else:
                            return False
                    return origmatchfn(f)
                m.matchfn = matchfn
                return m
            oldmatch = installmatchfn(overridematch)
            scmutil.match
            matches = overridematch(repo[None], pats, opts)
            orig(ui, repo, *pats, **opts)
        finally:
            restorematchfn()
        lfileslist = getattr(repo, '_lfilestoupdate', [])
        lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
                                printmessage=False)

        # empty out the largefiles list so we start fresh next time
        repo._lfilestoupdate = []
        for lfile in modified:
            if lfile in lfileslist:
                if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
                        in repo['.']:
                    lfutil.writestandin(repo, lfutil.standin(lfile),
                        repo['.'][lfile].data().strip(),
                        'x' in repo['.'][lfile].flags())
        lfdirstate = lfutil.openlfdirstate(ui, repo)
        for lfile in added:
            standin = lfutil.standin(lfile)
            if standin not in ctx and (standin in matches or opts.get('all')):
                if lfile in lfdirstate:
                    lfdirstate.drop(lfile)
                util.unlinkpath(repo.wjoin(standin))
        lfdirstate.write()
    finally:
        wlock.release()
Ejemplo n.º 60
0
def dodiff(ui, repo, cmdline, pats, opts):
    '''Do the actual diff:

    - copy to a temp structure if diffing 2 internal revisions
    - copy to a temp structure if diffing working revision with
      another one and more than 1 file is changed
    - just invoke the diff for a single file in the working dir
    '''

    revs = opts.get('rev')
    change = opts.get('change')
    do3way = '$parent2' in cmdline

    if revs and change:
        msg = _('cannot specify --rev and --change at the same time')
        raise error.Abort(msg)
    elif change:
        node2 = scmutil.revsingle(repo, change, None).node()
        node1a, node1b = repo.changelog.parents(node2)
    else:
        node1a, node2 = scmutil.revpair(repo, revs)
        if not revs:
            node1b = repo.dirstate.p2()
        else:
            node1b = nullid

    # Disable 3-way merge if there is only one parent
    if do3way:
        if node1b == nullid:
            do3way = False

    subrepos=opts.get('subrepos')

    matcher = scmutil.match(repo[node2], pats, opts)

    if opts.get('patch'):
        if subrepos:
            raise error.Abort(_('--patch cannot be used with --subrepos'))
        if node2 is None:
            raise error.Abort(_('--patch requires two revisions'))
    else:
        mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
                                                   listsubrepos=subrepos)[:3])
        if do3way:
            mod_b, add_b, rem_b = map(set,
                                      repo.status(node1b, node2, matcher,
                                                  listsubrepos=subrepos)[:3])
        else:
            mod_b, add_b, rem_b = set(), set(), set()
        modadd = mod_a | add_a | mod_b | add_b
        common = modadd | rem_a | rem_b
        if not common:
            return 0

    tmproot = tempfile.mkdtemp(prefix='extdiff.')
    try:
        if not opts.get('patch'):
            # Always make a copy of node1a (and node1b, if applicable)
            dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
            dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
                             subrepos)[0]
            rev1a = '@%d' % repo[node1a].rev()
            if do3way:
                dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
                dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
                                 subrepos)[0]
                rev1b = '@%d' % repo[node1b].rev()
            else:
                dir1b = None
                rev1b = ''

            fns_and_mtime = []

            # If node2 in not the wc or there is >1 change, copy it
            dir2root = ''
            rev2 = ''
            if node2:
                dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
                rev2 = '@%d' % repo[node2].rev()
            elif len(common) > 1:
                #we only actually need to get the files to copy back to
                #the working dir in this case (because the other cases
                #are: diffing 2 revisions or single file -- in which case
                #the file is already directly passed to the diff tool).
                dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot,
                                               subrepos)
            else:
                # This lets the diff tool open the changed file directly
                dir2 = ''
                dir2root = repo.root

            label1a = rev1a
            label1b = rev1b
            label2 = rev2

            # If only one change, diff the files instead of the directories
            # Handle bogus modifies correctly by checking if the files exist
            if len(common) == 1:
                common_file = util.localpath(common.pop())
                dir1a = os.path.join(tmproot, dir1a, common_file)
                label1a = common_file + rev1a
                if not os.path.isfile(dir1a):
                    dir1a = os.devnull
                if do3way:
                    dir1b = os.path.join(tmproot, dir1b, common_file)
                    label1b = common_file + rev1b
                    if not os.path.isfile(dir1b):
                        dir1b = os.devnull
                dir2 = os.path.join(dir2root, dir2, common_file)
                label2 = common_file + rev2
        else:
            template = 'hg-%h.patch'
            cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
                           template=repo.vfs.reljoin(tmproot, template),
                           match=matcher)
            label1a = cmdutil.makefilename(repo, template, node1a)
            label2 = cmdutil.makefilename(repo, template, node2)
            dir1a = repo.vfs.reljoin(tmproot, label1a)
            dir2 = repo.vfs.reljoin(tmproot, label2)
            dir1b = None
            label1b = None
            fns_and_mtime = []

        # Function to quote file/dir names in the argument string.
        # When not operating in 3-way mode, an empty string is
        # returned for parent2
        replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
                   'plabel1': label1a, 'plabel2': label1b,
                   'clabel': label2, 'child': dir2,
                   'root': repo.root}
        def quote(match):
            pre = match.group(2)
            key = match.group(3)
            if not do3way and key == 'parent2':
                return pre
            return pre + util.shellquote(replace[key])

        # Match parent2 first, so 'parent1?' will match both parent1 and parent
        regex = (r'''(['"]?)([^\s'"$]*)'''
                 r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
        if not do3way and not re.search(regex, cmdline):
            cmdline += ' $parent1 $child'
        cmdline = re.sub(regex, quote, cmdline)

        ui.debug('running %r in %s\n' % (cmdline, tmproot))
        ui.system(cmdline, cwd=tmproot)

        for copy_fn, working_fn, mtime in fns_and_mtime:
            if os.lstat(copy_fn).st_mtime != mtime:
                ui.debug('file changed while diffing. '
                         'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
                util.copyfile(copy_fn, working_fn)

        return 1
    finally:
        ui.note(_('cleaning up temp directory\n'))
        shutil.rmtree(tmproot)