示例#1
0
文件: journal.py 项目: motlin/cyg
def recordbookmarks(orig, store, fp):
    """Records all bookmark changes in the journal."""
    repo = store._repo
    if util.safehasattr(repo, 'journal'):
        oldmarks = bookmarks.bmstore(repo)
        for mark, value in store.iteritems():
            oldvalue = oldmarks.get(mark, node.nullid)
            if value != oldvalue:
                repo.journal.record(bookmarktype, mark, oldvalue, value)
    return orig(store, fp)
示例#2
0
def recordbookmarks(orig, store, fp):
    """Records all bookmark changes in the journal."""
    repo = store._repo
    if util.safehasattr(repo, 'journal'):
        oldmarks = bookmarks.bmstore(repo)
        for mark, value in pycompat.iteritems(store):
            oldvalue = oldmarks.get(mark, repo.nullid)
            if value != oldvalue:
                repo.journal.record(bookmarktype, mark, oldvalue, value)
    return orig(store, fp)
示例#3
0
def _masterrevset(ui, repo, masterstring):
    """
    Try to find the name of ``master`` -- usually a bookmark.

    Defaults to the last public revision, if no suitable local or remote
    bookmark is found.
    """

    if not masterstring:
        masterstring = ui.config('smartlog', 'master')

    if masterstring:
        return masterstring

    names = set(bookmarks.bmstore(repo).keys())
    if util.safehasattr(repo, 'names') and 'remotebookmarks' in repo.names:
        names.update(set(repo.names['remotebookmarks'].listnames(repo)))

    for name in _reposnames(ui):
        if name in names:
            return name

    return 'last(public())'
示例#4
0
def _masterrevset(ui, repo, masterstring):
    """
    Try to find the name of ``master`` -- usually a bookmark.

    Defaults to the last public revision, if no suitable local or remote
    bookmark is found.
    """

    if not masterstring:
        masterstring = ui.config('smartlog', 'master')

    if masterstring:
        return masterstring

    names = set(bookmarks.bmstore(repo).keys())
    if util.safehasattr(repo, 'names') and 'remotebookmarks' in repo.names:
        names.update(set(repo.names['remotebookmarks'].listnames(repo)))

    for name in _reposnames(ui):
        if name in names:
            return name

    return 'last(public())'
示例#5
0
def wrappedpullobsolete(orig, pullop):
    res = orig(pullop)

    repo = pullop.repo
    remote = pullop.remote

    if not isfirefoxrepo(repo):
        return res

    if remote.capable('firefoxtrees'):
        bmstore = bookmarks.bmstore(repo)
        # remote.local() returns a localrepository or None. If local,
        # just pass it into the wire protocol command/function to simulate
        # the remote command call.
        if remote.local():
            lines = firefoxtrees(remote.local(), None).splitlines()
        else:
            lines = remote._call('firefoxtrees').splitlines()
        oldtags = {}
        for tag, node, tree, uri in get_firefoxtrees(repo):
            oldtags[tag] = node
        newtags = {}
        changes = []
        for line in lines:
            tag, node = line.split()
            newtags[tag] = node

            node = bin(node)

            # A local bookmark of the incoming tag name is already set.
            # Wipe it out - the server takes precedence.
            if tag in bmstore:
                oldtags[tag] = bmstore[tag]
                repo.ui.status('(removing bookmark on %s matching firefoxtree %s)\n' %
                               (short(bmstore[tag]), tag))

                changes.append((tag, None))

                if bmstore.active == tag:
                    repo.ui.status('(deactivating bookmark %s)\n' % tag)
                    bookmarks.deactivate(repo)

            if oldtags.get(tag, None) == node:
                continue

            repo.firefoxtrees[tag] = node

            between = None
            if tag in oldtags:
                between = len(repo.revs('%n::%n', oldtags[tag], node)) - 1

                if not between:
                    continue

            msg = _('updated firefox tree tag %s') % tag
            if between:
                msg += _(' (+%d commits)') % between
            msg += '\n'
            repo.ui.status(msg)

        if changes:
            bmstore.applychanges(repo, pullop.gettransaction(), changes)

        writefirefoxtrees(repo)

    tree = resolve_uri_to_tree(remote.url())
    if tree:
        tree = tree.encode('utf-8')
        updateremoterefs(repo, remote, tree)

    return res
def wrappedpullobsolete(orig, pullop):
    res = orig(pullop)

    repo = pullop.repo
    remote = pullop.remote

    if not isfirefoxrepo(repo):
        return res

    if remote.capable('firefoxtrees'):
        bmstore = bookmarks.bmstore(repo)
        # remote.local() returns a localrepository or None. If local,
        # just pass it into the wire protocol command/function to simulate
        # the remote command call.
        if remote.local():
            lines = firefoxtrees(remote.local(), None).splitlines()
        else:
            lines = remote._call('firefoxtrees').splitlines()
        oldtags = {}
        for tag, node, tree, uri in get_firefoxtrees(repo):
            oldtags[tag] = node
        newtags = {}
        for line in lines:
            tag, node = line.split()
            newtags[tag] = node

            node = bin(node)

            # A local bookmark of the incoming tag name is already set.
            # Wipe it out - the server takes precedence.
            if tag in bmstore:
                oldtags[tag] = bmstore[tag]
                repo.ui.status('(removing bookmark on %s matching firefoxtree %s)\n' %
                               (short(bmstore[tag]), tag))
                del bmstore[tag]
                bmstore.recordchange(pullop.trmanager.transaction())

                if bmstore.active == tag:
                    repo.ui.status('(deactivating bookmark %s)\n' % tag)
                    bookmarks.deactivate(repo)

            if oldtags.get(tag, None) == node:
                continue

            repo.firefoxtrees[tag] = node

            between = None
            if tag in oldtags:
                between = len(repo.revs('%n::%n', oldtags[tag], node)) - 1

                if not between:
                    continue

            msg = _('updated firefox tree tag %s') % tag
            if between:
                msg += _(' (+%d commits)') % between
            msg += '\n'
            repo.ui.status(msg)

        writefirefoxtrees(repo)

    tree = resolve_uri_to_tree(remote.url())
    if tree:
        tree = tree.encode('utf-8')
        updateremoterefs(repo, remote, tree)

    return res
示例#7
0
def unifyrepo(ui, settings, **opts):
    """Unify the contents of multiple source repositories using settings.

    The settings file is a Mercurial config file (basically an INI file).
    """
    conf = unifyconfig(settings)

    # Ensure destrepo is created with generaldelta enabled.
    ui.setconfig('format', 'usegeneraldelta', True)
    ui.setconfig('format', 'generaldelta', True)

    # Verify all source repos have the same revision 0
    rev0s = set()
    for source in conf.sources:
        repo = hg.repository(ui, path=source['path'])

        # Verify
        node = repo[0].node()
        if rev0s and node not in rev0s:
            raise error.Abort('repository has different rev 0: %s\n' %
                              source['name'])

        # Verify pushlog exists
        pushlog = getattr(repo, 'pushlog', None)
        if not pushlog:
            raise error.Abort('pushlog API not available',
                              hint='is the pushlog extension loaded?')

        rev0s.add(node)

    # Ensure the staging repo has all changesets from the source repos.

    stageui = ui.copy()

    # Now collect all the changeset data with pushlog info.
    # node -> (when, source, rev, who, pushid)
    nodepushinfo = {}
    pushcount = 0
    allnodes = set()

    # Obtain pushlog data from each source repo. We obtain data for every node
    # and filter later because we want to be sure we have the earliest known
    # push data for a given node.
    for source in conf.sources:
        path = source['path']
        sourcerepo = hg.repository(ui, path=source['path'])
        pushlog = getattr(sourcerepo, 'pushlog', None)

        index = sourcerepo.changelog.index
        revnode = {}
        for rev in sourcerepo:
            # revlog.node() is too slow. Use the index directly.
            node = index[rev][7]
            revnode[rev] = node
            allnodes.add(node)

        noderev = {v: k for k, v in revnode.iteritems()}

        localpushcount = 0
        pushnodecount = 0
        for pushid, who, when, nodes in pushlog.pushes():
            pushcount += 1
            localpushcount += 1
            for node in nodes:
                pushnodecount += 1
                bnode = bin(node)

                # There is a race between us iterating the repo and querying the
                # pushlog. A new changeset could be written between when we
                # obtain nodes and encounter the pushlog. So ignore pushlog
                # for nodes we don't know about.
                if bnode not in noderev:
                    ui.warn('pushlog entry for unknown node: %s; '
                            'possible race condition?\n' % node)
                    continue

                rev = noderev[bnode]

                if bnode not in nodepushinfo:
                    nodepushinfo[bnode] = (when, path, rev, who, pushid)
                else:
                    currentwhen = nodepushinfo[bnode][0]
                    if when < currentwhen:
                        nodepushinfo[bnode] = (when, path, rev, who, pushid)

        ui.write(
            'obtained pushlog info for %d/%d revisions from %d pushes from %s\n'
            % (pushnodecount, len(revnode), localpushcount, source['name']))

    # Now verify that every node in the source repos has pushlog data.
    missingpl = allnodes - set(nodepushinfo.keys())
    if missingpl:
        raise error.Abort(
            'missing pushlog info for %d nodes: %s\n' %
            (len(missingpl), ', '.join(sorted(hex(n) for n in missingpl))))

    # Filter out changesets we aren't aggregating.
    # We also use this pass to identify which nodes to bookmark.
    books = {}
    sourcenodes = set()
    for source in conf.sources:
        sourcerepo = hg.repository(ui, path=source['path'])
        cl = sourcerepo.changelog
        index = cl.index

        sourcerevs = sourcerepo.revs(source['pullrevs'])
        sourcerevs.sort()
        headrevs = set(cl.headrevs())
        sourceheadrevs = headrevs & set(sourcerevs)

        # We /could/ allow multiple heads from each source repo. But for now
        # it is easier to limit to 1 head per source.
        if len(sourceheadrevs) > 1:
            raise error.Abort(
                '%s has %d heads' % (source['name'], len(sourceheadrevs)),
                hint='define pullrevs to limit what is aggregated')

        for rev in cl:
            if rev not in sourcerevs:
                continue

            node = index[rev][7]
            sourcenodes.add(node)
            if source['bookmark']:
                books[source['bookmark']] = node

        ui.write(
            'aggregating %d/%d revisions for %d heads from %s\n' %
            (len(sourcerevs), len(cl), len(sourceheadrevs), source['name']))

    nodepushinfo = {
        k: v
        for k, v in nodepushinfo.iteritems() if k in sourcenodes
    }

    ui.write('aggregating %d/%d nodes from %d original pushes\n' %
             (len(nodepushinfo), len(allnodes), pushcount))

    # We now have accounting for every changeset. Because pulling changesets
    # is a bit time consuming, it is worthwhile to minimize the number of pull
    # operations. We do this by ordering all changesets by original push time
    # then emitting the minimum number of "fast forward" nodes from the tip
    # of each linear range inside that list.

    # (time, source, rev, user, pushid) -> node
    inversenodeinfo = {v: k for k, v in nodepushinfo.iteritems()}

    destui = ui.copy()
    destui.setconfig('format', 'aggressivemergedeltas', True)
    destui.setconfig('format', 'maxchainlen', 10000)

    destrepo = hg.repository(destui,
                             path=conf.destpath,
                             create=not os.path.exists(conf.destpath))
    destcl = destrepo.changelog
    pullpushinfo = {
        k: v
        for k, v in inversenodeinfo.iteritems() if not destcl.hasnode(v)
    }

    ui.write('%d/%d nodes will be pulled\n' %
             (len(pullpushinfo), len(inversenodeinfo)))

    # Enable aggressive merge deltas on the stage repo to minimize manifest delta
    # size. This could make delta chains very long. So we may want to institute a
    # delta chain cap on the destination repo. But this will ensure the stage repo
    # has the most efficient/compact representation of deltas. Pulling from this
    # repo will also inherit the optimal delta, so we don't need to enable
    # aggressivemergedeltas on the destination repo.
    stageui.setconfig('format', 'aggressivemergedeltas', True)

    stagerepo = hg.repository(stageui,
                              path=conf.stagepath,
                              create=not os.path.exists(conf.stagepath))

    for source in conf.sources:
        path = source['path']
        sourcepeer = hg.peer(ui, {}, path)
        ui.write('pulling %s into %s\n' % (path, conf.stagepath))
        exchange.pull(stagerepo, sourcepeer)

    pullnodes = list(emitfastforwardnodes(stagerepo, pullpushinfo))
    unifiedpushes = list(unifypushes(inversenodeinfo))

    ui.write('consolidated into %d pulls from %d unique pushes\n' %
             (len(pullnodes), len(unifiedpushes)))

    if not pullnodes:
        ui.write('nothing to do; exiting\n')
        return

    stagepeer = hg.peer(ui, {}, conf.stagepath)

    for node in pullnodes:
        # TODO Bug 1265002 - we should update bookmarks when we pull.
        # Otherwise the changesets will get replicated without a bookmark
        # and any poor soul who pulls will see a nameless head.
        exchange.pull(destrepo, stagepeer, heads=[node])
        # For some reason there is a massive memory leak (10+ MB per
        # iteration on Firefox repos) if we don't gc here.
        gc.collect()

    # Now that we've aggregated all the changesets in the destination repo,
    # define the pushlog entries.
    pushlog = getattr(destrepo, 'pushlog', None)
    if not pushlog:
        raise error.Abort('pushlog API not available',
                          hint='is the pushlog extension loaded?')

    with destrepo.lock():
        with destrepo.transaction('pushlog') as tr:
            insertpushes = list(newpushes(destrepo, unifiedpushes))
            ui.write('inserting %d pushlog entries\n' % len(insertpushes))
            pushlog.recordpushes(insertpushes, tr=tr)

    # Verify that pushlog time in revision order is always increasing.
    destnodepushtime = {}
    for push in destrepo.pushlog.pushes():
        for node in push.nodes:
            destnodepushtime[bin(node)] = push.when

    destcl = destrepo.changelog
    lastpushtime = 0
    for rev in destrepo:
        node = destcl.node(rev)
        pushtime = destnodepushtime[node]

        if pushtime < lastpushtime:
            ui.warn('push time for %d is older than %d\n' % (rev, rev - 1))

        lastpushtime = pushtime

    # Write bookmarks.
    ui.write('writing %d bookmarks\n' % len(books))

    with destrepo.wlock():
        with destrepo.lock():
            with destrepo.transaction('bookmarks') as tr:
                bm = bookmarks.bmstore(destrepo)
                books.update({
                    book: None  # delete any bookmarks not found in the update
                    for book in bm.keys() if book not in books
                })
                # Mass replacing may not be the proper strategy. But it works for
                # our current use case.
                bm.applychanges(destrepo, tr, books.items())

    if not opts.get('skipreplicate'):
        # This is a bit hacky. Pushlog and bookmarks aren't currently replicated
        # via the normal hooks mechanism because we use the low-level APIs to
        # write them. So, we send a replication message to sync the entire repo.
        try:
            vcsr = extensions.find('vcsreplicator')
        except KeyError:
            raise error.Abort(
                'vcsreplicator extension not installed; '
                'pushlog and bookmarks may not be replicated properly')

        vcsr.replicatecommand(destrepo.ui, destrepo)
def unifyrepo(ui, settings):
    """Unify the contents of multiple source repositories using settings.

    The settings file is a Mercurial config file (basically an INI file).
    """
    conf = unifyconfig(settings)

    # Ensure destrepo is created with generaldelta enabled.
    ui.setconfig('format', 'usegeneraldelta', True)
    ui.setconfig('format', 'generaldelta', True)

    # Verify all source repos have the same revision 0
    rev0s = set()
    for source in conf.sources:
        repo = hg.repository(ui, path=source['path'])

        # Verify
        node = repo[0].node()
        if rev0s and node not in rev0s:
            ui.warn('repository has different rev 0: %s\n' % source['name'])

        rev0s.add(node)

    # Ensure the staging repo has all changesets from the source repos.

    stageui = ui.copy()

    # Enable aggressive merge deltas on the stage repo to minimize manifest delta
    # size. This could make delta chains very long. So we may want to institute a
    # delta chain cap on the destination repo. But this will ensure the stage repo
    # has the most efficient/compact representation of deltas. Pulling from this
    # repo will also inherit the optimal delta, so we don't need to enable
    # aggressivemergedeltas on the destination repo.
    stageui.setconfig('format', 'aggressivemergedeltas', True)

    stagerepo = hg.repository(stageui, path=conf.stagepath,
                              create=not os.path.exists(conf.stagepath))

    for source in conf.sources:
        path = source['path']
        sourcepeer = hg.peer(ui, {}, path)
        ui.write('pulling %s into %s\n' % (path, conf.stagepath))
        exchange.pull(stagerepo, sourcepeer)

    # Now collect all the changeset data with pushlog info.
    # node -> (when, source, rev, who, pushid)
    nodepushinfo = {}
    pushcount = 0
    allnodes = set()

    # Obtain pushlog data from each source repo. We obtain data for every node
    # and filter later because we want to be sure we have the earliest known
    # push data for a given node.
    for source in conf.sources:
        sourcerepo = hg.repository(ui, path=source['path'])
        pushlog = getattr(sourcerepo, 'pushlog', None)
        if not pushlog:
            raise error.Abort('pushlog API not available',
                              hint='is the pushlog extension loaded?')

        index = sourcerepo.changelog.index
        revnode = {}
        for rev in sourcerepo:
            # revlog.node() is too slow. Use the index directly.
            node = index[rev][7]
            revnode[rev] = node
            allnodes.add(node)

        noderev = {v: k for k, v in revnode.iteritems()}

        localpushcount = 0
        pushnodecount = 0
        for pushid, who, when, nodes in pushlog.pushes():
            pushcount += 1
            localpushcount += 1
            for node in nodes:
                pushnodecount += 1
                bnode = bin(node)

                # There is a race between us iterating the repo and querying the
                # pushlog. A new changeset could be written between when we
                # obtain nodes and encounter the pushlog. So ignore pushlog
                # for nodes we don't know about.
                if bnode not in noderev:
                    ui.warn('pushlog entry for unknown node: %s; '
                            'possible race condition?\n' % node)
                    continue

                rev = noderev[bnode]

                if bnode not in nodepushinfo:
                    nodepushinfo[bnode] = (when, path, rev, who, pushid)
                else:
                    currentwhen = nodepushinfo[bnode][0]
                    if when < currentwhen:
                        nodepushinfo[bnode] = (when, path, rev, who, pushid)

        ui.write('obtained pushlog info for %d/%d revisions from %d pushes from %s\n' % (
                 pushnodecount, len(revnode), localpushcount, source['name']))

    # Now verify that every node in the source repos has pushlog data.
    missingpl = allnodes - set(nodepushinfo.keys())
    if missingpl:
        raise error.Abort('missing pushlog info for %d nodes\n' % len(missingpl))

    # Filter out changesets we aren't aggregating.
    # We also use this pass to identify which nodes to bookmark.
    books = {}
    sourcenodes = set()
    for source in conf.sources:
        sourcerepo = hg.repository(ui, path=source['path'])
        cl = sourcerepo.changelog
        index = cl.index

        sourcerevs = sourcerepo.revs(source['pullrevs'])
        sourcerevs.sort()
        headrevs = set(cl.headrevs())
        sourceheadrevs = headrevs & set(sourcerevs)

        # We /could/ allow multiple heads from each source repo. But for now
        # it is easier to limit to 1 head per source.
        if len(sourceheadrevs) > 1:
            raise error.Abort('%s has %d heads' % (source['name'], len(sourceheadrevs)),
                              hint='define pullrevs to limit what is aggregated')

        for rev in cl:
            if rev not in sourcerevs:
                continue

            node = index[rev][7]
            sourcenodes.add(node)
            if source['bookmark']:
                books[source['bookmark']] = node

        ui.write('aggregating %d/%d revisions for %d heads from %s\n' % (
                 len(sourcerevs), len(cl), len(sourceheadrevs), source['name']))

    nodepushinfo = {k: v for k, v in nodepushinfo.iteritems() if k in sourcenodes}

    ui.write('aggregating %d/%d nodes from %d original pushes\n' % (
             len(nodepushinfo), len(allnodes), pushcount))

    # We now have accounting for every changeset. Because pulling changesets
    # is a bit time consuming, it is worthwhile to minimize the number of pull
    # operations. We do this by ordering all changesets by original push time
    # then emitting the minimum number of "fast forward" nodes from the tip
    # of each linear range inside that list.

    # (time, source, rev, user, pushid) -> node
    inversenodeinfo = {v: k for k, v in nodepushinfo.iteritems()}

    destui = ui.copy()
    destui.setconfig('format', 'aggressivemergedeltas', True)
    destui.setconfig('format', 'maxchainlen', 10000)

    destrepo = hg.repository(destui, path=conf.destpath,
                             create=not os.path.exists(conf.destpath))
    destcl = destrepo.changelog
    pullpushinfo = {k: v for k, v in inversenodeinfo.iteritems() if not destcl.hasnode(v)}

    ui.write('%d/%d nodes will be pulled\n' % (len(pullpushinfo), len(inversenodeinfo)))

    pullnodes = list(emitfastforwardnodes(stagerepo, pullpushinfo))
    unifiedpushes = list(unifypushes(inversenodeinfo))

    ui.write('consolidated into %d pulls from %d unique pushes\n' % (
             len(pullnodes), len(unifiedpushes)))

    if not pullnodes:
        ui.write('nothing to do; exiting\n')
        return

    stagepeer = hg.peer(ui, {}, conf.stagepath)

    for node in pullnodes:
        # TODO Bug 1265002 - we should update bookmarks when we pull.
        # Otherwise the changesets will get replicated without a bookmark
        # and any poor soul who pulls will see a nameless head.
        exchange.pull(destrepo, stagepeer, heads=[node])
        # For some reason there is a massive memory leak (10+ MB per
        # iteration on Firefox repos) if we don't gc here.
        gc.collect()

    # Now that we've aggregated all the changesets in the destination repo,
    # define the pushlog entries.
    pushlog = getattr(destrepo, 'pushlog', None)
    if not pushlog:
        raise error.Abort('pushlog API not available',
                          hint='is the pushlog extension loaded?')

    insertpushes = list(newpushes(destrepo, unifiedpushes))
    ui.write('inserting %d pushlog entries\n' % len(insertpushes))
    pushlog.recordpushes(insertpushes)

    # Verify that pushlog time in revision order is always increasing.
    destnodepushtime = {}
    for pushid, who, when, nodes in destrepo.pushlog.pushes():
        for node in nodes:
            destnodepushtime[bin(node)] = when

    destcl = destrepo.changelog
    lastpushtime = 0
    for rev in destrepo:
        node = destcl.node(rev)
        pushtime = destnodepushtime[node]

        if pushtime < lastpushtime:
            ui.warn('push time for %d is older than %d\n' % (rev, rev - 1))

        lastpushtime = pushtime

    # Write bookmarks.
    ui.write('writing %d bookmarks\n' % len(books))

    with destrepo.lock():
        with destrepo.transaction('bookmarks') as tr:
            bm = bookmarks.bmstore(destrepo)
            # Mass replacing may not be the proper strategy. But it works for
            # our current use case.
            bm.clear()
            bm.update(books)
            bm.recordchange(tr)

    # This is a bit hacky. Pushlog and bookmarks aren't currently replicated
    # via the normal hooks mechanism because we use the low-level APIs to
    # write them. So, we send a replication message to sync the entire repo.
    try:
        vcsr = extensions.find('vcsreplicator')
    except KeyError:
        raise error.Abort('vcsreplicator extension not installed; '
                          'pushlog and bookmarks may not be replicated properly')

    vcsr.replicatecommand(destrepo.ui, destrepo)
示例#9
0
def smartlogrevset(repo, subset, x):
    """``smartlog([master], [recentdays=N])``
    Changesets relevent to you.

    'master' is the head of the public branch.
    Unnamed heads will be hidden unless it's within 'recentdays'.
    """

    args = revset.getargsdict(x, 'smartlogrevset', 'master recentdays')
    if 'master' in args:
        masterstring = revsetlang.getstring(args['master'],
                                            _('master must be a string'))
    else:
        masterstring = ''

    recentdays = revsetlang.getinteger(args.get('recentdays'),
                                       _("recentdays should be int"), -1)

    revs = set()
    heads = set()

    rev = repo.changelog.rev
    ancestor = repo.changelog.ancestor
    node = repo.changelog.node
    parentrevs = repo.changelog.parentrevs

    books = bookmarks.bmstore(repo)
    ignore = re.compile(repo.ui.config('smartlog',
                                       'ignorebookmarks',
                                       '!'))
    for b in books:
        if not ignore.match(b):
            heads.add(rev(books[b]))

    # add 'interesting' remote bookmarks as well
    remotebooks = set()
    if util.safehasattr(repo, 'names') and 'remotebookmarks' in repo.names:
        ns = repo.names['remotebookmarks']
        remotebooks = set(ns.listnames(repo))
        for name in _reposnames(repo.ui):
            if name in remotebooks:
                heads.add(rev(ns.namemap(repo, name)[0]))

    heads.update(repo.revs('.'))

    global hiddenchanges
    headquery = 'head()'
    if remotebooks:
        # When we have remote bookmarks, only show draft heads, since public
        # heads should have a remote bookmark indicating them. This allows us
        # to force push server bookmarks to new locations, and not have the
        # commits clutter the user's smartlog.
        headquery = 'heads(draft())'

    allheads = set(repo.revs(headquery))
    if recentdays >= 0:
        recentquery = revsetlang.formatspec('%r & date(-%d)', headquery,
                                            recentdays)
        recentrevs = set(repo.revs(recentquery))
        hiddenchanges += len(allheads - heads) - len(recentrevs - heads)
        heads.update(recentrevs)
    else:
        heads.update(allheads)

    masterrevset = _masterrevset(repo.ui, repo, masterstring)
    masterrev = _masterrev(repo, masterrevset)

    if masterrev is None:
        masterrev = repo['tip'].rev()

    masternode = node(masterrev)

    # Find all draft ancestors and latest public ancestor of heads
    # that are not in master.
    # We don't want to draw all public commits because there can be too
    # many of them.
    # Don't use revsets, they are too slow
    for head in heads:
        anc = rev(ancestor(node(head), masternode))
        queue = [head]
        while queue:
            current = queue.pop(0)
            if current not in revs:
                revs.add(current)
                # stop as soon as we find public commit
                ispublic = repo[current].phase() == phases.public
                if current != anc and not ispublic:
                    parents = parentrevs(current)
                    for p in parents:
                        if p > anc:
                            queue.append(p)

    # add context: master, current commit, and the common ancestor
    revs.add(masterrev)

    return subset & revs
示例#10
0
def smartlogrevset(repo, subset, x):
    """``smartlog([master], [recentdays=N])``
    Changesets relevent to you.

    'master' is the head of the public branch.
    Unnamed heads will be hidden unless it's within 'recentdays'.
    """

    args = revset.getargsdict(x, 'smartlogrevset', 'master recentdays')
    if 'master' in args:
        masterstring = revsetlang.getstring(args['master'],
                                            _('master must be a string'))
    else:
        masterstring = ''

    recentdays = revsetlang.getinteger(args.get('recentdays'),
                                       _("recentdays should be int"), -1)

    revs = set()
    heads = set()

    rev = repo.changelog.rev
    branchinfo = repo.changelog.branchinfo
    ancestor = repo.changelog.ancestor
    node = repo.changelog.node
    parentrevs = repo.changelog.parentrevs

    books = bookmarks.bmstore(repo)
    ignore = re.compile(repo.ui.config('smartlog',
                                       'ignorebookmarks',
                                       '!'))
    for b in books:
        if not ignore.match(b):
            heads.add(rev(books[b]))

    # add 'interesting' remote bookmarks as well
    remotebooks = set()
    if util.safehasattr(repo, 'names') and 'remotebookmarks' in repo.names:
        ns = repo.names['remotebookmarks']
        remotebooks = set(ns.listnames(repo))
        for name in _reposnames(repo.ui):
            if name in remotebooks:
                heads.add(rev(ns.namemap(repo, name)[0]))

    heads.update(repo.revs('.'))

    global hiddenchanges
    headquery = 'head() & branch(.)'
    if remotebooks:
        # When we have remote bookmarks, only show draft heads, since public
        # heads should have a remote bookmark indicating them. This allows us
        # to force push server bookmarks to new locations, and not have the
        # commits clutter the user's smartlog.
        headquery = 'draft() &' + headquery

    allheads = set(repo.revs(headquery))
    if recentdays >= 0:
        recentquery = revsetlang.formatspec('%r & date(-%d)', headquery,
                                            recentdays)
        recentrevs = set(repo.revs(recentquery))
        hiddenchanges += len(allheads - heads) - len(recentrevs - heads)
        heads.update(recentrevs)
    else:
        heads.update(allheads)

    branches = set()
    for head in heads:
        branches.add(branchinfo(head)[0])

    masterrevset = _masterrevset(repo.ui, repo, masterstring)
    masterrev = _masterrev(repo, masterrevset)

    if masterrev is None:
        masterbranch = None
    else:
        masterbranch = branchinfo(masterrev)[0]

    for branch in branches:
        if branch != masterbranch:
            try:
                rs = 'first(reverse(branch("%s")) & public())' % branch
                branchmaster = repo.revs(rs).first()
                if branchmaster is None:
                    # local-only (draft) branch
                    rs = 'branch("%s")' % branch
                    branchmaster = repo.revs(rs).first()
            except Exception:
                branchmaster = repo.revs('tip').first()
        else:
            branchmaster = masterrev

        # Find all draft ancestors and latest public ancestor of heads
        # that are not in master.
        # We don't want to draw all public commits because there can be too
        # many of them.
        # Don't use revsets, they are too slow
        for head in heads:
            if branchinfo(head)[0] != branch:
                continue
            anc = rev(ancestor(node(head), node(branchmaster)))
            queue = [head]
            while queue:
                current = queue.pop(0)
                if current not in revs:
                    revs.add(current)
                    # stop as soon as we find public commit
                    ispublic = repo[current].phase() == phases.public
                    if current != anc and not ispublic:
                        parents = parentrevs(current)
                        for p in parents:
                            if p > anc:
                                queue.append(p)

        # add context: master, current commit, and the common ancestor
        revs.add(branchmaster)

        # get common branch ancestor
        if branch != masterbranch:
            anc = None
            for r in revs:
                if branchinfo(r)[0] != branch:
                    continue
                if anc is None:
                    anc = r
                else:
                    anc = rev(ancestor(node(anc), node(r)))
            if anc:
                revs.add(anc)

    return subset & revs