コード例 #1
0
    def filectxfn(repo, memctx, path):
        try:
            fctx = oldctx.filectx(path)

            # This wonky pattern is copied from memctx.__init__.
            copied = fctx.renamed()
            if copied:
                copied = copied[0]

            # isexec and islink didn't exist until Mercurial 3.2.
            islink = b'l' in fctx.flags()
            isexec = b'x' in fctx.flags()

            # TRACKING hg45 memctx argument was renamed to changectx and
            # converted from a named argument to positional argument in 4.5.
            spec = pycompat.getargspec(context.memfilectx.__init__)

            if 'changectx' in spec.args:
                # TRACKING hg50
                # `copied` renamed to `copysource`
                if util.versiontuple(n=2) >= (5, 0):
                    return context.memfilectx(repo,
                                              memctx,
                                              path,
                                              fctx.data(),
                                              islink=islink,
                                              isexec=isexec,
                                              copysource=copied)
                else:
                    return context.memfilectx(repo,
                                              memctx,
                                              path,
                                              fctx.data(),
                                              islink=islink,
                                              isexec=isexec,
                                              copied=copied)
            else:
                # TRACKING hg50
                # `copied` renamed to `copysource`
                if util.versiontuple(n=2) >= (5, 0):
                    return context.memfilectx(repo,
                                              path,
                                              fctx.data(),
                                              islink=islink,
                                              isexec=isexec,
                                              copysource=copied,
                                              memctx=memctx)
                else:
                    return context.memfilectx(repo,
                                              path,
                                              fctx.data(),
                                              islink=islink,
                                              isexec=isexec,
                                              copied=copied,
                                              memctx=memctx)
        except KeyError:
            return None
コード例 #2
0
def template_fxheads(*args, **kwargs):
    """:fxheads: List of strings. Firefox trees with heads on this commit."""
    # TRACKING hg46
    if util.versiontuple(n=2) >= (4, 6):
        context, mapping = args
        repo = context.resource(mapping, 'repo')
        ctx = context.resource(mapping, 'ctx')
        cache = context.resource(mapping, 'cache')
    else:
        repo = kwargs['repo']
        ctx = kwargs['ctx']
        cache = kwargs['cache']

    labels = _getcachedlabels(repo, ctx, cache)
    if not labels:
        return []

    res = set(tag for tag, node, tree, uri in labels if node == ctx.node())
    sortedres = sorted(res)

    # TRACKING hg47
    if templateutil:
        return templateutil.hybridlist(sortedres, 'log.tag')
    else:
        return sortedres
コード例 #3
0
    def filectxfn(repo, memctx, path):
        sourcepath = path[len(prefix):]
        if sourcepath not in sourceman:
            return None

        node, flags = sourceman.find(sourcepath)
        sourcefl = sourcerepo.file(sourcepath)
        data = sourcefl.read(node)

        islink = b'l' in flags
        isexec = b'x' in flags

        copied = None
        renamed = sourcefl.renamed(node)
        if renamed:
            copied = b'%s%s' % (prefix, renamed[0])

        # TRACKING hg50 - `copied` renamed to `copysource`
        if util.versiontuple(n=2) >= (5, 0):
            return context.memfilectx(repo,
                                      memctx,
                                      path,
                                      data,
                                      islink=islink,
                                      isexec=isexec,
                                      copysource=copied)
        else:
            return context.memfilectx(repo,
                                      memctx,
                                      path,
                                      data,
                                      islink=islink,
                                      isexec=isexec,
                                      copied=copied)
コード例 #4
0
    def filectxfn(repo, memctx, path):
        try:
            fctx = oldctx.filectx(path)

            # This wonky pattern is copied from memctx.__init__.
            copied = fctx.renamed()
            if copied:
                copied = copied[0]

            # isexec and islink didn't exist until Mercurial 3.2.
            islink = b'l' in fctx.flags()
            isexec = b'x' in fctx.flags()

            # TRACKING hg50
            # `copied` renamed to `copysource`
            if util.versiontuple(n=2) >= (5, 0):
                return context.memfilectx(repo, memctx, path, fctx.data(),
                                          islink=islink,
                                          isexec=isexec,
                                          copysource=copied)
            else:
                return context.memfilectx(repo, memctx, path, fctx.data(),
                                          islink=islink,
                                          isexec=isexec,
                                          copied=copied)
        except KeyError:
            return None
コード例 #5
0
    def createfn(repo, ctx, revmap, filectxfn):
        parents = newparents(repo, ctx, revmap)
        description = ctx.description()
        if not opts['unmodified']:
            description += b'\n%d' % offset[0]
        memctx = context.memctx(repo,
                                parents,
                                description,
                                ctx.files(),
                                filectxfn,
                                user=ctx.user(),
                                date=ctx.date(),
                                extra=ctx.extra())
        status = ctx.p1().status(ctx)
        # TRACKING hg53 - status is an object instead of a tuple
        if util.versiontuple(n=2) >= (5, 3):
            memctx.modified = lambda: status.modified
            memctx.added = lambda: status.added
            memctx.removed = lambda: status.removed
        else:
            memctx.modified = lambda: status[0]
            memctx.added = lambda: status[1]
            memctx.removed = lambda: status[2]
        offset[0] += 1

        return memctx
コード例 #6
0
def infowebcommand(web):
    """Get information about the specified changeset(s).

    This is a legacy API from before the days of Mercurial's built-in JSON
    API. It is used by unidentified parts of automation. Over time these
    consumers should transition to the modern/native JSON API.
    """
    req = web.req

    if 'node' not in req.qsparams:
        # TRACKING hg48
        if util.versiontuple(n=2) >= (4, 8):
            return web.sendtemplate('error', error="missing parameter 'node'")
        else:
            return web.sendtemplate('error', error={'error': "missing parameter 'node'"})

    nodes = req.qsparams.getall('node')

    csets = []
    for node in nodes:
        ctx = scmutil.revsymbol(web.repo, node)
        csets.append({
            'rev': ctx.rev(),
            'node': ctx.hex(),
            'user': ctx.user(),
            'date': ctx.date(),
            'description': ctx.description(),
            'branch': ctx.branch(),
            'tags': ctx.tags(),
            'parents': [p.hex() for p in ctx.parents()],
            'children': [c.hex() for c in ctx.children()],
            'files': ctx.files(),
        })

    return web.sendtemplate('info', csets=templateutil.mappinglist(csets))
コード例 #7
0
def supported_hg():
    '''Returns True if the Mercurial version is supported for robustcheckout'''
    return util.versiontuple(n=2) in (
        (4, 3),
        (4, 4),
        (4, 5),
        (4, 6),
        (4, 7),
    )
コード例 #8
0
ファイル: __init__.py プロジェクト: edunham/vcttools-cinnabar
def _checksecurity(ui, cw, hgversion):
    import ssl

    # Python + Mercurial didn't have terrific TLS handling until Python
    # 2.7.9 and Mercurial 3.4. For this reason, it was recommended to pin
    # certificates in Mercurial config files. In modern versions of
    # Mercurial, the system CA store is used and old, legacy TLS protocols
    # are disabled. The default connection/security setting should
    # be sufficient and pinning certificates is no longer needed.

    hg39 = util.versiontuple(n=2) >= (3, 9)
    modernssl = hasattr(ssl, 'SSLContext')

    def setfingerprints(porting=False):
        # Need to process in sorted order for tests to be deterministic.
        if hg39:
            cw.c.setdefault('hostsecurity', {})
            for k, v in sorted(MODERN_FINGERPRINTS.items()):
                if porting and k not in cw.c.get('hostfingerprints', {}):
                    continue

                cw.c['hostsecurity']['%s:fingerprints' % k] = v
        else:
            cw.c.setdefault('hostfingerprints', {})
            for k, v in sorted(HOST_FINGERPRINTS.items()):
                if porting and k not in cw.c['hostfingerprints']:
                    continue

                cw.c['hostfingerprints'][k] = v

    if not modernssl:
        setfingerprints()

    # We always update fingerprints if they are present. We /could/ offer to
    # remove fingerprints if running modern Python and Mercurial. But that
    # just adds more UI complexity and isn't worth it.
    have_legacy = any(k in cw.c.get('hostfingerprints', {})
                      for k in HOST_FINGERPRINTS)
    have_modern = any('%s:fingerprints' % k in cw.c.get('hostsecurity', {})
                      for k in MODERN_FINGERPRINTS)

    if have_legacy or have_modern:
        setfingerprints(porting=True)

    # If we're using Mercurial 3.9, remove legacy fingerprints if they
    # are present.
    if have_legacy and hg39:
        for k in HOST_FINGERPRINTS:
            try:
                del cw.c['hostfingerprints'][k]
            except KeyError:
                pass

        # Delete empty config section.
        if 'hostfingerprints' in cw.c and not cw.c['hostfingerprints']:
            del cw.c['hostfingerprints']
コード例 #9
0
def create_entry(ctx, web, pushid, user, date, node, mergehidden, parity, pushcount=None):
    """Creates an entry to be yielded in the `changelist` generator

    `pushcount` will be non-None when we are generating an entry for the first change
    in a given push
    """
    repo = web.repo
    n = ctx.node()
    ctxfiles = ctx.files()
    firstchange = pushcount is not None

    mergerollupval = templateutil.mappinglist(
        [{'count': pushcount}]
        if firstchange and mergehidden == 'hidden'
        else []
    )

    pushval = templateutil.mappinglist(
        [{"date": localdate(date), "user": user}]
        if firstchange
        else []
    )

    # TRACKING hg47
    # Call the function with whichever signature is correct
    if util.versiontuple(n=2) >= (4, 7):
        filediffs = webutil.listfilediffs(ctxfiles, node, len(ctxfiles))
    else:
        filediffs = webutil.listfilediffs(web.tmpl, ctxfiles, node, len(ctxfiles))

    return {
        "author": ctx.user(),
        "desc": ctx.description(),
        "files": filediffs,
        "rev": ctx.rev(),
        "node": hex(n),
        "parents": [c.hex() for c in ctx.parents()],
        "tags": webutil.nodetagsdict(repo, n),
        "branches": webutil.nodebranchdict(repo, ctx),
        "inbranch": webutil.nodeinbranch(repo, ctx),
        "hidden": mergehidden,
        "mergerollup": mergerollupval,
        "id": pushid,
        "parity": parity,
        "push": pushval,
    }
コード例 #10
0
def robustcheckout(ui,
                   url,
                   dest,
                   upstream=None,
                   revision=None,
                   branch=None,
                   purge=False,
                   sharebase=None,
                   networkattempts=None,
                   sparseprofile=None):
    """Ensure a working copy has the specified revision checked out.

    Repository data is automatically pooled into the common directory
    specified by ``--sharebase``, which is a required argument. It is required
    because pooling storage prevents excessive cloning, which makes operations
    complete faster.

    One of ``--revision`` or ``--branch`` must be specified. ``--revision``
    is preferred, as it is deterministic and there is no ambiguity as to which
    revision will actually be checked out.

    If ``--upstream`` is used, the repo at that URL is used to perform the
    initial clone instead of cloning from the repo where the desired revision
    is located.

    ``--purge`` controls whether to removed untracked and ignored files from
    the working directory. If used, the end state of the working directory
    should only contain files explicitly under version control for the requested
    revision.

    ``--sparseprofile`` can be used to specify a sparse checkout profile to use.
    The sparse checkout profile corresponds to a file in the revision to be
    checked out. If a previous sparse profile or config is present, it will be
    replaced by this sparse profile. We choose not to "widen" the sparse config
    so operations are as deterministic as possible. If an existing checkout
    is present and it isn't using a sparse checkout, we error. This is to
    prevent accidentally enabling sparse on a repository that may have
    clients that aren't sparse aware. Sparse checkout support requires Mercurial
    4.3 or newer and the ``sparse`` extension must be enabled.
    """
    if not revision and not branch:
        raise error.Abort('must specify one of --revision or --branch')

    if revision and branch:
        raise error.Abort('cannot specify both --revision and --branch')

    # Require revision to look like a SHA-1.
    if revision:
        if len(revision) < 12 or len(revision) > 40 or not re.match(
                '^[a-f0-9]+$', revision):
            raise error.Abort('--revision must be a SHA-1 fragment 12-40 '
                              'characters long')

    sharebase = sharebase or ui.config('share', 'pool')
    if not sharebase:
        raise error.Abort(
            'share base directory not defined; refusing to operate',
            hint='define share.pool config option or pass --sharebase')

    # Sparse profile support was added in Mercurial 4.3, where it was highly
    # experimental. Because of the fragility of it, we only support sparse
    # profiles on 4.3. When 4.4 is released, we'll need to opt in to sparse
    # support. We /could/ silently fall back to non-sparse when not supported.
    # However, given that sparse has performance implications, we want to fail
    # fast if we can't satisfy the desired checkout request.
    if sparseprofile:
        if util.versiontuple(n=2) not in ((4, 3), (4, 4), (4, 5)):
            raise error.Abort(
                'sparse profile support only available for '
                'Mercurial versions greater than 4.3 (using %s)' %
                util.version())

        try:
            extensions.find('sparse')
        except KeyError:
            raise error.Abort('sparse extension must be enabled to use '
                              '--sparseprofile')

    ui.warn('(using Mercurial %s)\n' % util.version())

    # worker.backgroundclose only makes things faster if running anti-virus,
    # which our automation doesn't. Disable it.
    ui.setconfig('worker', 'backgroundclose', False)

    # By default the progress bar starts after 3s and updates every 0.1s. We
    # change this so it shows and updates every 1.0s.
    # We also tell progress to assume a TTY is present so updates are printed
    # even if there is no known TTY.
    # We make the config change here instead of in a config file because
    # otherwise we're at the whim of whatever configs are used in automation.
    ui.setconfig('progress', 'delay', 1.0)
    ui.setconfig('progress', 'refresh', 1.0)
    ui.setconfig('progress', 'assume-tty', True)

    sharebase = os.path.realpath(sharebase)

    return _docheckout(ui,
                       url,
                       dest,
                       upstream,
                       revision,
                       branch,
                       purge,
                       sharebase,
                       networkattempts,
                       sparse_profile=sparseprofile)
コード例 #11
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None,
                sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui,
                           url,
                           dest,
                           upstream,
                           revision,
                           branch,
                           purge,
                           sharebase,
                           networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot enable sparse profile on existing '
            'non-sparse checkout',
            hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot use non-sparse checkout on existing sparse '
            'checkout',
            hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' %
                        (e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                destvfs.rmtree(forcibly=True)
            storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            res = hg.clone(ui, {},
                           clonepeer,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(
                    repo.root, '')

            if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{
                        'print': None,
                        'print0': None,
                        'dirs': None,
                        'files': None
                    }):
                raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock():
                fcounts = map(
                    len,
                    sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                          [sparse_profile],
                                                          force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
コード例 #12
0
ファイル: __init__.py プロジェクト: edunham/vcttools-cinnabar
def configwizard(ui, repo, statedir=None, **opts):
    """Ensure your Mercurial configuration is up to date."""
    runsteps = set(wizardsteps)

    # Mercurial <1.7 had a bug where monkeypatching ui.__class__
    # during uisetup() doesn't work. So we do our own ui.hasconfig()
    # here. Other uses of ui.hasconfig() are allowed, as they will
    # have a properly monkeypatched ui.__class__.
    if 'steps' in ui._data(False)._data.get('configwizard', {}):
        runsteps = set(ui.configlist('configwizard', 'steps'))

    hgversion = util.versiontuple(n=3)

    if hgversion < MINIMUM_SUPPORTED_VERSION:
        ui.warn(VERSION_TOO_OLD % (
            hgversion[0], hgversion[1],
            MINIMUM_SUPPORTED_VERSION[0], MINIMUM_SUPPORTED_VERSION[1],
        ))
        raise error.Abort('upgrade Mercurial then run again')

    uiprompt(ui, INITIAL_MESSAGE, default='<RETURN>')

    configpaths = [p for p in scmutil.userrcpath() if os.path.exists(p)]
    path = configpaths[0] if configpaths else scmutil.userrcpath()[0]
    cw = configobjwrapper(path)

    if 'hgversion' in runsteps:
        if _checkhgversion(ui, hgversion):
            return 1

    if 'username' in runsteps:
        _checkusername(ui, cw)

    if 'diff' in runsteps:
        _checkdiffsettings(ui, cw)

    if 'color' in runsteps:
        _promptnativeextension(ui, cw, 'color', 'Enable color output to your terminal')

    if 'pager' in runsteps:
        _checkpager(ui, cw)

    if 'curses' in runsteps:
        _checkcurses(ui, cw)

    if 'historyediting' in runsteps:
        _checkhistoryediting(ui, cw)

    if 'fsmonitor' in runsteps:
        _checkfsmonitor(ui, cw, hgversion)

    if 'blackbox' in runsteps:
        _promptnativeextension(ui, cw, 'blackbox',
                               'Enable logging of commands to help diagnose bugs '
                               'and performance problems')

    if 'security' in runsteps:
        _checksecurity(ui, cw, hgversion)

    if 'firefoxtree' in runsteps:
        _promptvctextension(ui, cw, 'firefoxtree', FIREFOXTREE_INFO)

    if 'wip' in runsteps:
        _checkwip(ui, cw)

    if 'codereview' in runsteps:
        _checkcodereview(ui, cw)

    if 'pushtotry' in runsteps:
        _promptvctextension(ui, cw, 'push-to-try', PUSHTOTRY_INFO)

    if 'multiplevct' in runsteps:
        _checkmultiplevct(ui, cw)

    if 'configchange' in runsteps:
        _handleconfigchange(ui, cw)

    if 'permissions' in runsteps:
        _checkpermissions(ui, cw)

    return 0
コード例 #13
0
def replacechangesets(repo, oldnodes, createfn, backuptopic=b'replacing'):
    """Replace changesets with new versions.

    This is a generic function used to perform history rewriting.

    Given an iterable of input nodes, a function will be called which is
    expected to produce a new changeset to replace the input node. The
    function signature should be:

        def createfn(repo, ctx, revmap, copyfilectxfn):

    It is passed a repo, the changectx being rewritten, a map of old to new
    revisions that have been changed so far, and a function that can be used
    as the memctx callback for obtaining memfilectx when no file modifications
    are to be performed (a common pattern). The function should return an
    *uncommitted* memctx holding the new changeset info.

    We currently restrict that the createfn callback must return a new
    changeset and that no file changes may occur. Restricting file changes
    satisfies the requirements this function was invented for and keeps the
    implementation simple.

    After the memctx is obtained, it is committed. Children changesets are
    rebased automatically after all changesets have been rewritten.

    After the old to new mapping is obtained, bookmarks are moved and old
    changesets are made obsolete or stripped, depending on what is appropriate
    for the repo configuration.

    This function handles locking the repository and performing as many actions
    in a transaction as possible.

    Before any changes are made, we verify the state of the repo is sufficient
    for transformation to occur and abort otherwise.
    """
    if not oldnodes:
        return {}

    repo = repo.unfiltered()

    # Validate function called properly.
    for node in oldnodes:
        if len(node) != 20:
            raise error.Abort(b'replacechangesets expects 20 byte nodes')

    uoldrevs = [repo[node].rev() for node in oldnodes]
    oldrevs = sorted(uoldrevs)
    if oldrevs != uoldrevs:
        raise error.Abort(b'must pass oldnodes in changelog order')

    # We may perform stripping and stripping inside a nested transaction
    # is a recipe for disaster.
    # currenttransaction was added in 3.3. Copy the implementation until we
    # drop 3.2 compatibility.
    if hasattr(repo, 'currenttransaction'):
        intrans = repo.currenttransaction()
    else:
        if repo._transref and repo._transref().running():
            intrans = True
        else:
            intrans = False

    if intrans:
        raise error.Abort(b'cannot call replacechangesets when a transaction '
                          b'is active')

    # The revisions impacted by the current operation. This is essentially
    # all non-hidden children. We don't operate on hidden changesets because
    # there is no point - they are hidden and deemed not important.
    impactedrevs = list(repo.filtered(b'visible').revs(b'%ld::', oldrevs))

    # If we'll need to update the working directory, don't do anything if there
    # are uncommitted changes, as this could cause a giant mess (merge
    # conflicts, etc). Note the comparison against impacted revs, as children
    # of rewritten changesets will be rebased below.
    dirstaterev = repo[repo.dirstate.p1()].rev()
    if dirstaterev in impactedrevs:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

    obsenabled = False
    if hasattr(obsolete, 'isenabled'):
        obsenabled = obsolete.isenabled(repo, b'createmarkers')
    else:
        obsenabled = obsolete._enabled

    def adjustphase(repo, tr, phase, node):
        # transaction argument added in Mercurial 3.2.
        try:
            phases.advanceboundary(repo, tr, phase, [node])
            phases.retractboundary(repo, tr, phase, [node])
        except TypeError:
            phases.advanceboundary(repo, phase, [node])
            phases.retractboundary(repo, phase, [node])

    nodemap = {}
    wlock, lock, tr = None, None, None
    try:
        wlock = repo.wlock()
        lock = repo.lock()
        tr = repo.transaction(b'replacechangesets')

        # Create the new changesets.
        revmap = OrderedDict()
        for oldnode in oldnodes:
            oldctx = repo[oldnode]

            # Copy revmap out of paranoia.
            newctx = createfn(repo, oldctx, dict(revmap),
                              preservefilectx(oldctx))

            if not isinstance(newctx, context.memctx):
                raise error.Abort(b'createfn must return a context.memctx')

            if oldctx == newctx:
                raise error.Abort(b'createfn must create a new changeset')

            newnode = newctx.commit()
            # Needed so .manifestnode() works, which memctx doesn't have.
            newctx = repo[newnode]

            # This makes the implementation significantly simpler as we don't
            # need to worry about merges when we do auto rebasing later.
            if oldctx.manifestnode() != newctx.manifestnode():
                raise error.Abort(b'we do not allow replacements to modify files')

            revmap[oldctx.rev()] = newctx.rev()
            nodemap[oldnode] = newnode

            # Do phase adjustment ourselves because we want callbacks to be as
            # dumb as possible.
            adjustphase(repo, tr, oldctx.phase(), newctx.node())

        # Children of rewritten changesets are impacted as well. Rebase as
        # needed.
        for rev in impactedrevs:
            # It was handled by createfn() or by this loop already.
            if rev in revmap:
                continue

            oldctx = repo[rev]
            if oldctx.p1().rev() not in revmap:
                raise error.Abort(b'unknown parent of child commit: %s' %
                                 oldctx.hex(),
                                 hint=b'please report this as a bug')

            parents = newparents(repo, oldctx, revmap)
            mctx = context.memctx(repo, parents, oldctx.description(),
                                  oldctx.files(), preservefilectx(oldctx),
                                  user=oldctx.user(), date=oldctx.date(),
                                  extra=oldctx.extra())
            status = oldctx.p1().status(oldctx)

            # TRACKING hg53 - status is an object instead of a tuple
            if util.versiontuple(n=2) >= (5, 3):
                mctx.modified = lambda: status.modified
                mctx.added = lambda: status.added
                mctx.removed = lambda: status.removed
            else:
                mctx.modified = lambda: status[0]
                mctx.added = lambda: status[1]
                mctx.removed = lambda: status[2]
            newnode = mctx.commit()
            revmap[rev] = repo[newnode].rev()
            nodemap[oldctx.node()] = newnode

            # Retain phase.
            adjustphase(repo, tr, oldctx.phase(), newnode)

            ph = repo.ui.config(b'phases', b'new-commit')
            try:
                repo.ui.setconfig(b'phases', b'new-commit', oldctx.phase(),
                                  b'rewriting')
                newnode = mctx.commit()
                revmap[rev] = repo[newnode].rev()
            finally:
                repo.ui.setconfig(b'phases', b'new-commit', ph)

        # Move bookmarks to new nodes.
        bmchanges = []
        oldactivebookmark = activebookmark(repo)

        for oldrev, newrev in revmap.items():
            oldnode = repo[oldrev].node()
            for mark, bmnode in repo._bookmarks.items():
                if bmnode == oldnode:
                    bmchanges.append((mark, repo[newrev].node()))

        if bmchanges:
            # TODO unconditionally call applychanges() when support for
            # Mercurial 4.1 is dropped.
            if util.safehasattr(repo._bookmarks, b'applychanges'):
                repo._bookmarks.applychanges(repo, tr, bmchanges)
            else:
                for mark, newnode in bmchanges:
                    repo._bookmarks[mark] = newnode

                repo._bookmarks.recordchange(tr)

        # If obsolescence is enabled, obsolete the old changesets.
        if obsenabled:
            markers = []
            for oldrev, newrev in revmap.items():
                if repo[oldrev] != repo[newrev]:
                    markers.append((repo[oldrev], (repo[newrev],)))
            if markers:
                obsolete.createmarkers(repo, markers)

        # Move the working directory to the new node, if applicable.
        wdirrev = repo[b'.'].rev()
        if wdirrev in revmap:
            hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True)

        # The active bookmark is tracked by its symbolic name, not its
        # changeset. Since we didn't do anything that should change the
        # active bookmark, we shouldn't need to adjust it.
        if activebookmark(repo) != oldactivebookmark:
            raise error.Abort(b'active bookmark changed; '
                              b'this should not occur!',
                              hint=b'please file a bug')

        tr.close()

        # Unless obsolescence is enabled, strip any obsolete changesets.
        if not obsenabled:
            stripnodes = []
            for oldrev, newrev in revmap.items():
                if repo[oldrev] != repo[newrev]:
                    stripnodes.append(repo[oldrev].node())
            if stripnodes:
                repair.strip(repo.ui, repo, stripnodes, topic=backuptopic)

    finally:
        if tr:
            tr.release()
        lockmod.release(wlock, lock)

    return nodemap
コード例 #14
0

def _getcachedlabels(repo, ctx, cache):
    labels = cache.get('fxheads', None)
    if labels is None:
        if isfirefoxrepo(repo):
            labels = list(get_firefoxtrees(repo))
            cache['fxheads'] = labels
        else:
            labels = False
            cache['fxheads'] = False

    return labels

# TRACKING hg46
if util.versiontuple(n=2) >= (4, 6):
    fxheadsdec = templatekeyword('fxheads', requires={'repo, ctx, cache'})
else:
    fxheadsdec = templatekeyword('fxheads')

@fxheadsdec
def template_fxheads(*args, **kwargs):
    """:fxheads: List of strings. Firefox trees with heads on this commit."""
    # TRACKING hg46
    if util.versiontuple(n=2) >= (4, 6):
        context, mapping = args
        repo = context.resource(mapping, 'repo')
        ctx = context.resource(mapping, 'ctx')
        cache = context.resource(mapping, 'cache')
    else:
        repo = kwargs['repo']
コード例 #15
0
ファイル: bz.py プロジェクト: dracular/version-control-tools
    def __init__(self, bug, node):
        Attachment.__init__(self, bug, node)
        self.flags = list(sorted(Flag(bug, n) for n in node.findall('flag')))
        rawtext = base64.b64decode(node.find('data').text)

        # TRACKING hg46 - `patch.extract` is now a context manager
        if util.versiontuple(n=2) >= (4, 6):
            with patch.extract(bug.settings.ui,
                               StringIO.StringIO(rawtext)) as data:
                filename = data.get('filename')
                message = data.get('message')
                user = data.get('user')
                date = data.get('date')
                branch = data.get('branch')
                nodeid = data.get('node')
                p1 = data.get('p1')
                p2 = data.get('p2')

                # for some reason, patch.extract writes a temporary file with the diff hunks
                if filename:
                    try:
                        # BugZilla is not explicit about patch encoding. We need to check it's utf-8.
                        # utf-8: convert from 8-bit encoding to internal (16/32-bit) Unicode.
                        with open(filename) as fp:
                            self.data = fp.read().decode('utf-8')
                        # Attempt to detect the start of the diff. Borrowed from:
                        # http://selenic.com/hg/file/79e5de2bfa8c/mercurial/patch.py#l163
                        diffre = re.compile(
                            r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
                            r'retrieving revision [0-9]+(\.[0-9]+)*$|'
                            r'---[ \t].*?^\+\+\+[ \t]|'
                            r'\*\*\*[ \t].*?^---[ \t])',
                            re.MULTILINE | re.DOTALL)
                        m = diffre.search(self.data)
                        if m:
                            # Remove the patch header, since we'll be re-adding a cleaned up version later.
                            self.data = self.data[m.start(0):]
                    except UnicodeDecodeError:
                        # Ftr, this could be due to the (included) message part: see later message block.
                        bug.settings.ui.warn(
                            "Patch id=%s desc=\"%s\" diff data were discarded:\n"
                            % (self.id, self.desc))
                        # Print the exception without its traceback.
                        sys.excepthook(sys.exc_info()[0],
                                       sys.exc_info()[1], None)
                        # Can't do better than discard data:
                        # trying |.decode('utf-8', 'replace')| as a fallback would be too risky
                        #   if user imports the result then forgets to fix it.
                        self.data = ''
                else:
                    self.data = ''
        else:
            data = patch.extract(bug.settings.ui, StringIO.StringIO(rawtext))

            filename = data.get('filename')
            message = data.get('message')
            user = data.get('user')
            date = data.get('date')
            branch = data.get('branch')
            nodeid = data.get('node')
            p1 = data.get('p1')
            p2 = data.get('p2')

            # for some reason, patch.extract writes a temporary file with the diff hunks
            if filename:
                fp = file(filename)
                try:
                    # BugZilla is not explicit about patch encoding. We need to check it's utf-8.
                    # utf-8: convert from 8-bit encoding to internal (16/32-bit) Unicode.
                    self.data = fp.read().decode('utf-8')
                    # Attempt to detect the start of the diff. Borrowed from:
                    # http://selenic.com/hg/file/79e5de2bfa8c/mercurial/patch.py#l163
                    diffre = re.compile(
                        r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
                        r'retrieving revision [0-9]+(\.[0-9]+)*$|'
                        r'---[ \t].*?^\+\+\+[ \t]|'
                        r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE | re.DOTALL)
                    m = diffre.search(self.data)
                    if m:
                        # Remove the patch header, since we'll be re-adding a cleaned up version later.
                        self.data = self.data[m.start(0):]
                except UnicodeDecodeError:
                    # Ftr, this could be due to the (included) message part: see later message block.
                    bug.settings.ui.warn(
                        "Patch id=%s desc=\"%s\" diff data were discarded:\n" %
                        (self.id, self.desc))
                    # Print the exception without its traceback.
                    sys.excepthook(sys.exc_info()[0], sys.exc_info()[1], None)
                    # Can't do better than discard data:
                    # trying |.decode('utf-8', 'replace')| as a fallback would be too risky
                    #   if user imports the result then forgets to fix it.
                    self.data = ''
                fp.close()
                os.remove(filename)
            else:
                self.data = ''

        # Remove seconds (which are always ':00') and timezone from the patch date:
        # keep 'yyyy-mm-dd hh:mn' only.
        self.date = date or node.find('date').text[:16]

        if user:
            try:
                # See previous self.data block about utf-8 handling.
                self.author = user.decode('utf-8')
            except UnicodeDecodeError:
                bug.settings.ui.warn(
                    "Patch id=%s desc=\"%s\" user data were discarded:\n" %
                    (self.id, self.desc))
                sys.excepthook(sys.exc_info()[0], sys.exc_info()[1], None)
                user = None
        if not user:
            # Bugzilla v3.4.1+: "Email Addresses Hidden From Logged-Out Users"
            patchAttacherEmail = node.find('attacher').text
            # 'patchAttacherEmail' may not be enough, compare date too to be as precise as possible...
            posts = [
                p for p in self.bug.comments
                if p.date == self.date and p.who_email == patchAttacherEmail
            ]
            who = posts[0].who
            for p in posts:
                if p.who == who:
                    continue
                print "Warning: could not figure out exact author (multiple names for same date and email address)!"
                who = ""
                break
            # Email domain may need to be retrieved/added manually...
            self.author = "%s <%s>" % (
                # Scrub the :cruft and any '[...]' or '(...)' too from the username.
                re.sub("\[.*?\]|\(.*?\)|:\S+", "", who).strip(),
                patchAttacherEmail)

        self.commit_message = None
        # (Mercurial v1.4.3(-!?)) "No message" is extracted as '\n' :-/
        # Want to strip the message anyway.
        if message:
            try:
                # See previous self.data block about utf-8 handling.
                self.commit_message = message.decode('utf-8').strip()
            except UnicodeDecodeError:
                bug.settings.ui.warn(
                    "Patch id=%s desc=\"%s\" message data were discarded too:\n"
                    % (self.id, self.desc))
                sys.excepthook(sys.exc_info()[0], sys.exc_info()[1], None)
                message = None
        if not self.commit_message:
            self.commit_message = self.bug.settings.msg_format % self.metadata
コード例 #16
0
def headdivergencewebcommand(web):
    """Get information about divergence between this repo and a changeset.

    This API was invented to be used by MozReview to obtain information about
    how a repository/head has progressed/diverged since a commit was submitted
    for review.

    It is assumed that this is running on the canonical/mainline repository.
    Changes in other repositories must be rebased onto or merged into
    this repository.
    """
    req = web.req

    if b'node' not in req.qsparams:
        # TRACKING hg48
        if util.versiontuple(n=2) >= (4, 8):
            return web.sendtemplate(b'error',
                                    error=b"missing parameter 'node'")
        else:
            return web.sendtemplate(
                b'error', error={b'error': b"missing parameter 'node'"})

    repo = web.repo

    paths = set(req.qsparams.getall(b'p'))
    basectx = scmutil.revsymbol(repo, req.qsparams[b'node'])

    # Find how much this repo has changed since the requested changeset.
    # Our heuristic is to find the descendant head with the highest revision
    # number. Most (all?) repositories we care about for this API should have
    # a single head per branch. And we assume the newest descendant head is
    # the one we care about the most. We don't care about branches because
    # if a descendant is on different branch, then the repo has likely
    # transitioned to said branch.
    #
    # If we ever consolidate Firefox repositories, we'll need to reconsider
    # this logic, especially if release repos with their extra branches/heads
    # are involved.

    # Specifying "start" only gives heads that are descendants of "start."
    headnodes = repo.changelog.heads(start=basectx.node())

    headrev = max(repo[n].rev() for n in headnodes)
    headnode = repo[headrev].node()

    betweennodes, outroots, outheads = \
        repo.changelog.nodesbetween([basectx.node()], [headnode])

    # nodesbetween returns base node. So prune.
    betweennodes = betweennodes[1:]

    commitsbehind = len(betweennodes)

    # If rev 0 or a really old revision is passed in, we could DoS the server
    # by having to iterate nearly all changesets. Establish a cap for number
    # of changesets to examine.
    maxnodes = repo.ui.configint(b'hgmo', b'headdivergencemaxnodes', 1000)
    filemergesignored = False
    if len(betweennodes) > maxnodes:
        betweennodes = []
        filemergesignored = True

    filemerges = {}
    for node in betweennodes:
        ctx = repo[node]

        files = set(ctx.files())
        for p in files & paths:
            filemerges.setdefault(p, []).append(ctx.hex())

    return web.sendtemplate(b'headdivergence',
                            commitsbehind=commitsbehind,
                            filemerges=filemerges,
                            filemergesignored=filemergesignored)
コード例 #17
0
def robustcheckout(ui, url, dest, upstream=None, revision=None, branch=None,
                   purge=False, sharebase=None, networkattempts=None,
                   sparseprofile=None):
    """Ensure a working copy has the specified revision checked out.

    Repository data is automatically pooled into the common directory
    specified by ``--sharebase``, which is a required argument. It is required
    because pooling storage prevents excessive cloning, which makes operations
    complete faster.

    One of ``--revision`` or ``--branch`` must be specified. ``--revision``
    is preferred, as it is deterministic and there is no ambiguity as to which
    revision will actually be checked out.

    If ``--upstream`` is used, the repo at that URL is used to perform the
    initial clone instead of cloning from the repo where the desired revision
    is located.

    ``--purge`` controls whether to removed untracked and ignored files from
    the working directory. If used, the end state of the working directory
    should only contain files explicitly under version control for the requested
    revision.

    ``--sparseprofile`` can be used to specify a sparse checkout profile to use.
    The sparse checkout profile corresponds to a file in the revision to be
    checked out. If a previous sparse profile or config is present, it will be
    replaced by this sparse profile. We choose not to "widen" the sparse config
    so operations are as deterministic as possible. If an existing checkout
    is present and it isn't using a sparse checkout, we error. This is to
    prevent accidentally enabling sparse on a repository that may have
    clients that aren't sparse aware. Sparse checkout support requires Mercurial
    4.3 or newer and the ``sparse`` extension must be enabled.
    """
    if not revision and not branch:
        raise error.Abort('must specify one of --revision or --branch')

    if revision and branch:
        raise error.Abort('cannot specify both --revision and --branch')

    # Require revision to look like a SHA-1.
    if revision:
        if len(revision) < 12 or len(revision) > 40 or not re.match('^[a-f0-9]+$', revision):
            raise error.Abort('--revision must be a SHA-1 fragment 12-40 '
                              'characters long')

    sharebase = sharebase or ui.config('share', 'pool')
    if not sharebase:
        raise error.Abort('share base directory not defined; refusing to operate',
                          hint='define share.pool config option or pass --sharebase')

    # Sparse profile support was added in Mercurial 4.3, where it was highly
    # experimental. Because of the fragility of it, we only support sparse
    # profiles on 4.3. When 4.4 is released, we'll need to opt in to sparse
    # support. We /could/ silently fall back to non-sparse when not supported.
    # However, given that sparse has performance implications, we want to fail
    # fast if we can't satisfy the desired checkout request.
    if sparseprofile:
        if util.versiontuple(n=2) not in ((4, 3), (4, 4), (4, 5)):
            raise error.Abort('sparse profile support only available for '
                              'Mercurial versions greater than 4.3 (using %s)' % util.version())

        try:
            extensions.find('sparse')
        except KeyError:
            raise error.Abort('sparse extension must be enabled to use '
                              '--sparseprofile')

    ui.warn('(using Mercurial %s)\n' % util.version())

    # worker.backgroundclose only makes things faster if running anti-virus,
    # which our automation doesn't. Disable it.
    ui.setconfig('worker', 'backgroundclose', False)

    # By default the progress bar starts after 3s and updates every 0.1s. We
    # change this so it shows and updates every 1.0s.
    # We also tell progress to assume a TTY is present so updates are printed
    # even if there is no known TTY.
    # We make the config change here instead of in a config file because
    # otherwise we're at the whim of whatever configs are used in automation.
    ui.setconfig('progress', 'delay', 1.0)
    ui.setconfig('progress', 'refresh', 1.0)
    ui.setconfig('progress', 'assume-tty', True)

    sharebase = os.path.realpath(sharebase)

    return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                       sharebase, networkattempts,
                       sparse_profile=sparseprofile)
コード例 #18
0
def _checkwip(ui, cw):
    havewip_alias = ui.hasconfig('alias', 'wip')
    havewip_revset = ui.hasconfig('revsetalias', 'wip')

    hg_version = util.versiontuple(n=2)

    # If the user has the `wip` revset alias, they are on hg46+ and have the old alias
    # (ie with `orphan` expression instead of `unstable`), we upgrade with a notice
    if havewip_revset and hg_version >= (4, 6) and 'unstable' in ui.config(
            'revsetalias', 'wip'):
        ui.write(WIP_UPDATED_EXPRESSION)
    elif not havewip_alias and uipromptchoice(ui, WIP_INFO):
        return

    # The wip configuration changes over time. Ensure it is up to date.
    cw.c.setdefault('alias', {})
    cw.c.setdefault('revsetalias', {})
    cw.c.setdefault('templates', {})

    cw.c['alias']['wip'] = 'log --graph --rev=wip --template=wip'

    if hg_version < (4, 6):
        unstable = 'unstable'
    else:
        unstable = 'orphan'

    wiprevset = ('('
                 'parents(not public()) '
                 'or not public() '
                 'or . '
                 'or (head() and branch(default))'
                 ') and (not obsolete() or %s()^) '
                 'and not closed()') % unstable

    if ui.hasconfig('extensions', 'firefoxtree') or 'firefoxtree' in cw.c.get(
            'extensions', {}):
        wiprevset += ' and not (fxheads() - date(-90))'

    cw.c['revsetalias']['wip'] = wiprevset

    cw.c['templates']['wip'] = (
        "'"
        # branches
        '{label("wip.branch", if(branches,"{branches} "))}'
        # revision and node
        '{label(ifeq(graphnode,"x","wip.obsolete","wip.{phase}"),"{rev}:{node|short}")}'
        # just the username part of the author, for brevity
        '{label("wip.user", " {author|user}")}'
        # tags
        '{label("wip.tags", if(tags," {tags}"))}'
        '{label("wip.tags", if(fxheads," {fxheads}"))}'
        # bookmarks (taking care to not underline the separator)
        '{if(bookmarks," ")}'
        '{label("wip.bookmarks", if(bookmarks,bookmarks))}'
        # first line of commit message
        '{label(ifcontains(rev, revset("parents()"), "wip.here"), " {desc|firstline}")}'
        "'")

    # Set the colors for the parts of the WIP output.
    _set_color(cw, 'wip.bookmarks', 'yellow underline')
    _set_color(cw, 'wip.branch', 'yellow')
    _set_color(cw, 'wip.draft', 'green')
    _set_color(cw, 'wip.here', 'red')
    _set_color(cw, 'wip.obsolete', 'none')
    _set_color(cw, 'wip.public', 'blue')
    _set_color(cw, 'wip.tags', 'yellow')
    _set_color(cw, 'wip.user', 'magenta')

    # Enabling graphshorten greately improves the graph output.
    if 'experimental' not in cw.c:
        cw.c['experimental'] = {}
    cw.c['experimental']['graphshorten'] = 'true'

    # wip is paged automatically if pager is built-in... unless the pager
    # extension is enabled. So we set ``pager.attend-wip`` iff the pager
    # extension is present.
    if 'pager' in cw.c.get('extensions', {}):
        cw.c.setdefault('pager', {})
        cw.c['pager']['attend-wip'] = 'true'
コード例 #19
0
def configwizard(ui, repo, statedir=None, **opts):
    """Ensure your Mercurial configuration is up to date."""
    runsteps = set(wizardsteps)

    # Mercurial <1.7 had a bug where monkeypatching ui.__class__
    # during uisetup() doesn't work. So we do our own ui.hasconfig()
    # here. Other uses of ui.hasconfig() are allowed, as they will
    # have a properly monkeypatched ui.__class__.
    if 'steps' in ui._data(False)._data.get('configwizard', {}):
        runsteps = set(ui.configlist('configwizard', 'steps'))

    hgversion = util.versiontuple(n=3)

    # The point release version can be None for e.g. X.Y versions. Normalize
    # to make tuple compares work.
    if hgversion[2] is None:
        hgversion = (hgversion[0], hgversion[1], 0)

    if hgversion < MINIMUM_SUPPORTED_VERSION:
        ui.warn(VERSION_TOO_OLD % (
            hgversion[0],
            hgversion[1],
            MINIMUM_SUPPORTED_VERSION[0],
            MINIMUM_SUPPORTED_VERSION[1],
        ))
        raise error.Abort('upgrade Mercurial then run again')

    uiprompt(ui, INITIAL_MESSAGE, default='<RETURN>')

    with demandimport.deactivated():
        # Mercurial 4.2 moved function from scmutil to rcutil.
        try:
            from mercurial.rcutil import userrcpath
        except ImportError:
            from mercurial.scmutil import userrcpath

    configpaths = [p for p in userrcpath() if os.path.exists(p)]
    path = configpaths[0] if configpaths else userrcpath()[0]
    cw = configobjwrapper(path)

    if 'hgversion' in runsteps:
        if _checkhgversion(ui, hgversion):
            return 1

    if 'username' in runsteps:
        _checkusername(ui, cw)

    if 'tweakdefaults' in runsteps:
        _checktweakdefaults(ui, cw)

    if 'diff' in runsteps:
        _checkdiffsettings(ui, cw)

    if 'color' in runsteps:
        _checkcolor(ui, cw, hgversion)

    if 'pager' in runsteps:
        _checkpager(ui, cw, hgversion)

    if 'curses' in runsteps:
        _checkcurses(ui, cw)

    if 'historyediting' in runsteps:
        _checkhistoryediting(ui, cw, hgversion)

    if 'evolve' in runsteps:
        _checkevolve(ui, cw, hgversion)

    if 'fsmonitor' in runsteps:
        _checkfsmonitor(ui, cw, hgversion)

    if 'blackbox' in runsteps:
        _promptnativeextension(
            ui, cw, 'blackbox',
            'Enable logging of commands to help diagnose bugs '
            'and performance problems')

    if 'security' in runsteps:
        _checksecurity(ui, cw, hgversion)

    if 'firefoxtree' in runsteps:
        _promptvctextension(ui, cw, 'firefoxtree', FIREFOXTREE_INFO)

    if 'clang-format' in runsteps:
        _promptvctextension(ui, cw, 'clang-format', CLANG_FORMAT_INFO)

    if 'js-format' in runsteps:
        _promptvctextension(ui, cw, 'js-format', JS_FORMAT_INFO)

    if 'format-source' in runsteps:
        _checkformatsource(ui, cw)

    if 'wip' in runsteps:
        _checkwip(ui, cw)

    if 'smartannotate' in runsteps:
        _checksmartannotate(ui, cw)

    if 'codereview' in runsteps:
        _checkcodereview(ui, cw)

    if 'pushtotry' in runsteps:
        _promptvctextension(ui, cw, 'push-to-try', PUSHTOTRY_INFO)

    if 'multiplevct' in runsteps:
        _checkmultiplevct(ui, cw)

    if 'configchange' in runsteps:
        _handleconfigchange(ui, cw)

    if 'permissions' in runsteps:
        _checkpermissions(ui, cw)

    return 0
コード例 #20
0
ファイル: genosxversion.py プロジェクト: Smosker/mercurial
def paranoidver(ver):
    """Given an hg version produce something that distutils can sort.

    Some Mac package management systems use distutils code in order to
    figure out upgrades, which makes life difficult. The test case is
    a reduced version of code in the Munki tool used by some large
    organizations to centrally manage OS X packages, which is what
    inspired this kludge.

    >>> paranoidver('3.4')
    '3.4.0'
    >>> paranoidver('3.4.2')
    '3.4.2'
    >>> paranoidver('3.0-rc+10')
    '2.9.9999-rc+10'
    >>> paranoidver('4.2+483-5d44d7d4076e')
    '4.2.0+483-5d44d7d4076e'
    >>> paranoidver('4.2.1+598-48d1e1214d8c')
    '4.2.1+598-48d1e1214d8c'
    >>> paranoidver('4.3-rc')
    '4.2.9999-rc'
    >>> paranoidver('4.3')
    '4.3.0'
    >>> from distutils import version
    >>> class LossyPaddedVersion(version.LooseVersion):
    ...     '''Subclass version.LooseVersion to compare things like
    ...     "10.6" and "10.6.0" as equal'''
    ...     def __init__(self, s):
    ...             self.parse(s)
    ...
    ...     def _pad(self, version_list, max_length):
    ...         'Pad a version list by adding extra 0 components to the end'
    ...         # copy the version_list so we don't modify it
    ...         cmp_list = list(version_list)
    ...         while len(cmp_list) < max_length:
    ...             cmp_list.append(0)
    ...         return cmp_list
    ...
    ...     def __cmp__(self, other):
    ...         if isinstance(other, str):
    ...             other = MunkiLooseVersion(other)
    ...         max_length = max(len(self.version), len(other.version))
    ...         self_cmp_version = self._pad(self.version, max_length)
    ...         other_cmp_version = self._pad(other.version, max_length)
    ...         return cmp(self_cmp_version, other_cmp_version)
    >>> def testver(older, newer):
    ...   o = LossyPaddedVersion(paranoidver(older))
    ...   n = LossyPaddedVersion(paranoidver(newer))
    ...   return o < n
    >>> testver('3.4', '3.5')
    True
    >>> testver('3.4.0', '3.5-rc')
    True
    >>> testver('3.4-rc', '3.5')
    True
    >>> testver('3.4-rc+10-deadbeef', '3.5')
    True
    >>> testver('3.4.2', '3.5-rc')
    True
    >>> testver('3.4.2', '3.5-rc+10-deadbeef')
    True
    >>> testver('4.2+483-5d44d7d4076e', '4.2.1+598-48d1e1214d8c')
    True
    >>> testver('4.3-rc', '4.3')
    True
    >>> testver('4.3', '4.3-rc')
    False
    """
    major, minor, micro, extra = util.versiontuple(ver, n=4)
    if micro is None:
        micro = 0
    if extra:
        if extra.startswith('rc'):
            if minor == 0:
                major -= 1
                minor = 9
            else:
                minor -= 1
            micro = 9999
            extra = '-' + extra
        else:
            extra = '+' + extra
    else:
        extra = ''
    return '%d.%d.%d%s' % (major, minor, micro, extra)
コード例 #21
0
def cmdutiladd(ui, repo, storage_matcher):
    if util.versiontuple(n=2) >= (5, 0):
        uipathfn = scmutil.getuipathfn(repo, forcerelativevalue=True)
        cmdutil.add(ui, repo, storage_matcher, "", uipathfn, True)
    else:
        cmdutil.add(ui, repo, storage_matcher, "", True)
コード例 #22
0
def cmd_format_source(ui, repo, tool, *pats, **opts):
    """register a tool to format source files during merges and rebases

    Record a mapping from the given file pattern FILES to a source formatting
    tool TOOL. Mappings are stored in the version-controlled file
    (automatically committed when format-source is used) .hg-format-source in
    the root of the checkout. The mapping causes TOOL to be run on FILES during
    future merge and rebase operations.

    The actual command run for TOOL needs to be registered in the config. See
    :hg:`help -e format-source` for details.

    """
    if repo.getcwd():
        msg = _("format-source must be run from repository root")
        hint = _("cd %s") % repo.root
        raise error.Abort(msg, hint=hint)

    if not pats:
        raise error.Abort(_('no files specified'))

    # XXX We support glob pattern only for now, the recursive behavior of various others is a bit wonky.
    for pattern in pats:
        if not pattern.startswith('glob:'):
            msg = _("format-source only supports explicit 'glob' patterns "
                    "for now ('%s')")
            msg %= pattern
            hint = _('maybe try with "glob:%s"') % pattern
            raise error.Abort(msg, hint=hint)

    # lock the repo to make sure no content is changed
    with repo.wlock():
        # formatting tool
        if ' ' in tool:
            raise error.Abort(_("tool name cannot contain space: '%s'") % tool)

        # if tool was not specified in the cfg maybe we can use our mozilla firefox in tree clang-format tool
        if should_use_default(repo, tool):
            shell_tool, tool_config_files, file_ext = return_default_clang_format(
                repo)
        else:
            shell_tool = repo.ui.config('format-source', tool)
            tool_config_files = repo.ui.configlist('format-source',
                                                   '%s:configpaths' % tool)
            file_ext = tuple(
                repo.ui.configlist('format-source', '%s:fileext' % tool))

        if not shell_tool:
            msg = _("unknown format tool: %s (no 'format-source.%s' config)")
            raise error.Abort(msg.format(tool, tool))
        if not file_ext:
            msg = _("no {}:fileext present".format(tool))
            raise error.Abort(msg.format(tool, tool))
        cmdutil.bailifchanged(repo)
        cmdutil.checkunfinished(repo, commit=True)
        wctx = repo[None]
        # files to be formatted
        matcher = scmutil.match(wctx, pats, opts)
        files = list(wctx.matches(matcher))

        if util.versiontuple(n=2) >= (4, 7):
            # In 4.7 we have ui.makeprogress
            with ui.makeprogress(_('formatting'),
                                 unit=_('files'),
                                 total=len(files)) as progress:
                proc = worker.worker(ui, 0.1, batchformat,
                                     (repo, wctx, tool, shell_tool, file_ext),
                                     files)
                for filepath in proc:
                    progress.increment(item=filepath)
        else:
            proc = worker.worker(ui, 0.1, batchformat,
                                 (repo, wctx, tool, shell_tool, file_ext),
                                 files)
            # Wait for everything to finish
            for filepath in proc:
                pass

        # update the storage to mark formatted file as formatted
        with repo.wvfs(file_storage_path, mode='ab') as storage:
            for pattern in pats:
                # XXX if pattern was relative, we need to reroot it from the
                # repository root. For now we constrained the command to run
                # at the root of the repository.
                data = {
                    'tool': encoding.unifromlocal(tool),
                    'pattern': encoding.unifromlocal(pattern)
                }
                if tool_config_files:
                    data['configpaths'] = [
                        encoding.unifromlocal(path)
                        for path in tool_config_files
                    ]
                entry = json.dumps(data, sort_keys=True)
                assert '\n' not in entry
                storage.write('%s\n' % entry)

        if file_storage_path not in wctx:
            storage_matcher = scmutil.match(wctx,
                                            ['path:' + file_storage_path])
            cmdutil.add(ui, repo, storage_matcher, '', True)

        # commit the whole
        with repo.lock():
            commit_patterns = ['path:' + file_storage_path]
            commit_patterns.extend(pats)
            return commands._docommit(ui, repo, *commit_patterns, **opts)
コード例 #23
0
def supported_hg():
    '''Returns True if the Mercurial version is supported for robustcheckout'''
    return b'.'.join(pycompat.bytestr(v)
                     for v in util.versiontuple(n=2)) in testedwith.split()
コード例 #24
0
def automationrelevancewebcommand(web):
    req = web.req

    if b'node' not in req.qsparams:
        # TRACKING hg48
        if util.versiontuple(n=2) >= (4, 8):
            return web.sendtemplate(b'error',
                                    error=b"missing parameter 'node'")
        else:
            return web.sendtemplate(
                b'error', error={b'error': b"missing parameter 'node'"})

    repo = web.repo
    deletefields = {
        b'bookmarks',
        b'branch',
        b'branches',
        b'changelogtag',
        b'child',
        b'ctx',
        b'inbranch',
        b'instabilities',
        b'obsolete',
        b'parent',
        b'succsandmarkers',
        b'tags',
        b'whyunstable',
    }

    csets = []
    # Query an unfiltered repo because sometimes automation wants to run against
    # changesets that have since become hidden. The response exposes whether the
    # requested node is visible, so consumers can make intelligent decisions
    # about what to do if the changeset isn't visible.
    urepo = repo.unfiltered()

    revs = list(urepo.revs(b'automationrelevant(%r)', req.qsparams[b'node']))

    # The pushlog extensions wraps webutil.commonentry and the way it is called
    # means pushlog opens a SQLite connection on every call. This is inefficient.
    # So we pre load and cache data for pushlog entries we care about.
    cl = urepo.changelog
    nodes = [cl.node(rev) for rev in revs]

    with repo.unfiltered().pushlog.cache_data_for_nodes(nodes):
        for rev in revs:
            ctx = urepo[rev]
            entry = webutil.changelistentry(web, ctx)

            if req.qsparams.get(b'backouts'):
                backout_node = get_backoutbynode(b'hgmo', repo, ctx)
                if backout_node is not None:
                    entry[b'backedoutby'] = backout_node

            # The pushnodes list is redundant with data from other changesets.
            # The amount of redundant data for pushes containing N>100
            # changesets can add up to megabytes in size.
            try:
                del entry[b'pushnodes']
            except KeyError:
                pass

            # Some items in changelistentry are generators, which json.dumps()
            # can't handle. So we expand them.
            entrycopy = copy.copy(entry)
            for k, v in entrycopy.items():
                # "files" is a generator that attempts to call a template.
                # Don't even bother and just repopulate it.
                if k == b'files':
                    entry[b'files'] = sorted(ctx.files())
                elif k == b'allparents':
                    # TRACKING hg48
                    # generic template keyword args needed (context, mapping)
                    # they are not actually used, so `None, None` is sufficient
                    if util.versiontuple(n=2) >= (4, 8):
                        iterator = v(None, None).itermaps(ctx)
                    else:
                        iterator = v().itermaps(ctx)

                    entry[b'parents'] = [p[b'node'] for p in iterator]
                    del entry[b'allparents']
                # These aren't interesting to us, so prune them. The
                # original impetus for this was because "changelogtag"
                # isn't part of the json template and adding it is non-trivial.
                elif k in deletefields:
                    del entry[k]
                elif isinstance(v, types.GeneratorType):
                    entry[k] = list(v)

            csets.append(entry)

    # Advertise whether the requested revision is visible (non-obsolete).
    if csets:
        visible = csets[-1][b'node'] in repo
    else:
        visible = None

    data = {
        b'changesets': templateutil.mappinglist(csets),
        b'visible': visible,
    }

    return web.sendtemplate(b'automationrelevance', **pycompat.strkwargs(data))
コード例 #25
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing " b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse " b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
            ui.warn(
                b"(shared store does not belong to pooled storage; "
                b"deleting destination to improve efficiency)\n"
            )
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(
            b"(dest has an active working directory lock; assuming it is "
            b"left over from a previous process and that the destination "
            b"is corrupt; deleting it just to be sure)\n"
        )
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(
                b"reached maximum number of network attempts; " b"giving up\n"
            )

        ui.warn(
            b"(retrying after network failure on attempt %d of %d)\n"
            % (networkattempts[0], networkattemptlimit)
        )

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(_(b"unexpected response from remote server:")):
                ui.warn(b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" % pycompat.bytestr(e.reason))
                handlenetworkfailure()
                return True
            else:
                ui.warn(
                    b"unhandled URLError; reason type: %s; value: %s\n"
                    % (e.reason.__class__.__name__, e.reason)
                )
        else:
            ui.warn(
                b"unhandled exception during network operation; type: %s; "
                b"value: %s\n" % (e.__class__.__name__, e)
            )

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone " b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(
            b"(shared store has an active lock; assuming it is left "
            b"over from a previous process and that the store is "
            b"corrupt; deleting store and destination just to be "
            b"sure)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(
            b"(shared store missing requires file; this is a really "
            b"odd failure; deleting store and destination)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(
                b"(shared store missing requirements: %s; deleting "
                b"store and destination to ensure optimal behavior)\n"
                % b", ".join(sorted(missing))
            )
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires", "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(
                    b"(remote resolved %s to %s; "
                    b"result is not deterministic)\n" % (branch, checkoutrevision)
                )

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purgeext = extensions.find(b"purge")

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, ""
                    )

            with timeit("purge", "purge"):
                if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{"print": None, "print0": None, "dirs": None, "files": None}
                ):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(
                b"sparse profile %s does not exist at revision "
                b"%s" % (sparse_profile, checkoutrevision)
            )

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(
                repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
            )
        else:
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
            ui.write(
                b"(sparse profile %s already set; no need to update "
                b"sparse config)\n" % sparse_profile
            )
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(
                    b"(replacing existing sparse config with profile "
                    b"%s)\n" % sparse_profile
                )
            else:
                ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(
                            repo, [], [], [sparse_profile], force=True
                        ),
                    )
                )

                repo.ui.status(
                    b"%d files added, %d files dropped, "
                    b"%d files conflicting\n" % tuple(fcounts)
                )

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
コード例 #26
0
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
                networkattemptlimit, networkattempts=None, sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort('cannot enable sparse profile on existing '
                          'non-sparse checkout',
                          hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort('cannot use non-sparse checkout on existing sparse '
                          'checkout',
                          hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' % (
                    e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                destvfs.rmtree(forcibly=True)
            storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            res = hg.clone(ui, {}, clonepeer, dest=dest, update=False,
                           shareopts={'pool': sharebase, 'mode': 'identity'})
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                                   'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(repo.root, '')

            if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                              # The function expects all arguments to be
                              # defined.
                              **{'print': None, 'print0': None, 'dirs': None,
                                 'files': None}):
                raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock():
                fcounts = map(len, sparsemod._updateconfigandrefreshwdir(
                    repo, [], [], [sparse_profile], force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None