Beispiel #1
0
def _decorate(fctx):
    text = fctx.data()
    linecount = text.count("\n")
    if text and not text.endswith("\n"):
        linecount += 1
    return ([(fctx, i) for i in range(linecount)], text)
Beispiel #2
0
    def _resolvelines(self, annotateresult, revmap, linelog):
        """(annotateresult) -> [line]. designed for annotatealllines.
        this is probably the most inefficient code in the whole fastannotate
        directory. but we have made a decision that the linelog does not
        store line contents. so getting them requires random accesses to
        the revlog data, since they can be many, it can be very slow.
        """
        # [llrev]
        revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
        result = [None] * len(annotateresult)
        # {(rev, linenum): [lineindex]}
        key2idxs = collections.defaultdict(list)
        for i in range(len(result)):
            key2idxs[(revs[i], annotateresult[i][1])].append(i)
        while key2idxs:
            # find an unresolved line and its linelog rev to annotate
            hsh = None
            try:
                for (rev, _linenum), idxs in pycompat.iteritems(key2idxs):
                    if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
                        continue
                    hsh = annotateresult[idxs[0]][0]
                    break
            except StopIteration:  # no more unresolved lines
                return result
            if hsh is None:
                # the remaining key2idxs are not in main branch, resolving them
                # using the hard way...
                revlines = {}
                for (rev, linenum), idxs in pycompat.iteritems(key2idxs):
                    if rev not in revlines:
                        hsh = annotateresult[idxs[0]][0]
                        if self.ui.debugflag:
                            self.ui.debug(
                                "fastannotate: reading %s line #%d "
                                "to resolve lines %r\n"
                                % (node.short(hsh), linenum, idxs)
                            )
                        fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
                        lines = mdiff.splitnewlines(fctx.data())
                        revlines[rev] = lines
                    for idx in idxs:
                        result[idx] = revlines[rev][linenum]
                assert all(x is not None for x in result)
                return result

            # run the annotate and the lines should match to the file content
            self.ui.debug(
                "fastannotate: annotate %s to resolve lines\n" % node.short(hsh)
            )
            linelog.annotate(rev)
            fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
            annotated = linelog.annotateresult
            lines = mdiff.splitnewlines(fctx.data())
            if len(lines) != len(annotated):
                raise faerror.CorruptedFileError("unexpected annotated lines")
            # resolve lines from the annotate result
            for i, line in enumerate(lines):
                k = annotated[i]
                if k in key2idxs:
                    for idx in key2idxs[k]:
                        result[idx] = line
                    del key2idxs[k]
        return result
Beispiel #3
0
 def renderstring(win, state, output):
     maxy, maxx = win.getmaxyx()
     length = min(maxy - 1, len(output))
     for y in range(0, length):
         win.addstr(y, 0, output[y])
     win.noutrefresh()
Beispiel #4
0
def getdag(ui, repo, revs, master):

    knownrevs = set(revs)
    gpcache = {}
    results = []
    reserved = []

    # we store parents together with the parent type information
    # but sometimes we need just a list of parents
    # [(a,b), (c,d), (e,f)] => [b, d, f]
    def unzip(parents):
        if parents:
            return list(list(zip(*parents))[1])
        else:
            return list()

    simplifygrandparents = ui.configbool("log", "simplify-grandparents")
    cl = repo.changelog
    if cl.algorithmbackend != "segments":
        simplifygrandparents = False
    if simplifygrandparents:
        rootnodes = cl.tonodes(revs)

    # For each rev we need to show, compute it's parents in the dag.
    # If we have to reach for a grandparent, insert a fake node so we
    # can show '...' in the graph.
    # Use 'reversed' to start at the lowest commit so fake nodes are
    # placed at their lowest possible positions.
    for rev in reversed(revs):
        ctx = repo[rev]
        # Parents in the dag
        parents = sorted(
            set([(graphmod.PARENT, p.rev()) for p in ctx.parents()
                 if p.rev() in knownrevs]))
        # Parents not in the dag
        mpars = [
            p.rev() for p in ctx.parents()
            if p.rev() != nodemod.nullrev and p.rev() not in unzip(parents)
        ]

        for mpar in mpars:
            gp = gpcache.get(mpar)
            if gp is None:
                if simplifygrandparents:
                    gp = gpcache[mpar] = cl.torevs(
                        cl.dageval(lambda: headsancestors(
                            ancestors(cl.tonodes([mpar])) & rootnodes)))
                else:
                    gp = gpcache[mpar] = dagop.reachableroots(
                        repo, smartset.baseset(revs), [mpar])
            if not gp:
                parents.append((graphmod.MISSINGPARENT, mpar))
            else:
                gp = [g for g in gp if g not in unzip(parents)]
                for g in gp:
                    parents.append((graphmod.GRANDPARENT, g))

        results.append((ctx.rev(), "C", ctx, parents))

    # Compute parent rev->parents mapping
    lookup = {}
    for r in results:
        lookup[r[0]] = unzip(r[3])

    def parentfunc(node):
        return lookup.get(node, [])

    # Compute the revs on the master line. We use this for sorting later.
    masters = set()
    queue = [master]
    while queue:
        m = queue.pop()
        if m not in masters:
            masters.add(m)
            queue.extend(lookup.get(m, []))

    # Topologically sort the noderev numbers. Note: unlike the vanilla
    # topological sorting, we move master to the top.
    order = sortnodes([r[0] for r in results], parentfunc, masters)
    order = dict((e[1], e[0]) for e in enumerate(order))

    # Sort the actual results based on their position in the 'order'
    try:
        results.sort(key=lambda x: order[x[0]], reverse=True)
    except ValueError:  # Happened when 'order' is empty
        ui.warn(_("smartlog encountered an error\n"), notice=_("note"))
        ui.warn(_("(so the sorting might be wrong.\n\n)"))
        results.reverse()

    # indent the top non-public stack
    if ui.configbool("smartlog", "indentnonpublic", False):
        rev, ch, ctx, parents = results[0]
        if ctx.phase() != phases.public:
            # find a public parent and add a fake node, so the non-public nodes
            # will be shown in the non-first column
            prev = None
            for i in range(1, len(results)):
                pctx = results[i][2]
                if pctx.phase() == phases.public:
                    prev = results[i][0]
                    break
            if prev:
                reserved.append(prev)

    return results, reserved
Beispiel #5
0
def _buildlinkrevcache(ui, repo, db, end):
    checkancestor = ui.configbool("linkrevcache", "checkancestor", True)
    readfilelog = ui.configbool("linkrevcache", "readfilelog", True)
    # 2441406: 10G by default (assuming page size = 4K).
    maxpagesize = ui.configint("linkrevcache", "maxpagesize") or 2441406

    repo = repo.unfiltered()
    cl = repo.changelog
    idx = cl.index
    ml = repo.manifestlog

    filelogcache = {}

    def _getfilelog(path):
        if path not in filelogcache:
            # Make memory usage bounded
            if len(filelogcache) % 1000 == 0:
                if _getrsspagecount() > maxpagesize:
                    filelogcache.clear()
            filelogcache[path] = filelog.filelog(repo.svfs, path)
        return filelogcache[path]

    start = db.getlastrev() + 1

    # the number of ancestor tests when the slow (Python) stateful (cache
    # ancestors) algorithm is faster than the fast (C) stateless (walk through
    # the changelog index every time) algorithm.
    ancestorcountthreshold = 10

    with progress.bar(ui, _("building"), _("changesets"), end) as prog:
        for rev in range(start, end + 1):
            prog.value = rev
            clr = cl.changelogrevision(rev)
            md = ml[clr.manifest].read()

            if checkancestor:
                if len(clr.files) >= ancestorcountthreshold:
                    # we may need to frequently test ancestors against rev,
                    # in this case, pre-calculating rev's ancestors helps.
                    ancestors = cl.ancestors([rev])

                    def isancestor(x):
                        return x in ancestors

                else:
                    # the C index ancestor testing is faster than Python's
                    # lazyancestors.
                    def isancestor(x):
                        return x in idx.commonancestorsheads(x, rev)

            for path in clr.files:
                if path not in md:
                    continue

                fnode = md[path]

                if readfilelog:
                    fl = _getfilelog(path)
                    frev = fl.rev(fnode)
                    lrev = fl.linkrev(frev)
                    if lrev == rev:
                        continue
                else:
                    lrev = None

                if checkancestor:
                    linkrevs = set(db.getlinkrevs(path, fnode))
                    if lrev is not None:
                        linkrevs.add(lrev)
                    if rev in linkrevs:
                        continue
                    if any(isancestor(l) for l in linkrevs):
                        continue

                # found a new linkrev!
                if ui.debugflag:
                    ui.debug("%s@%s: new linkrev %s\n" %
                             (path, node.hex(fnode), rev))

                db.appendlinkrev(path, fnode, rev)

            db.setlastrev(rev)
Beispiel #6
0
 def serialize(cls, status):
     ls = [list(status[i]) for i in range(7)]
     ll = []
     for s in ls:
         ll.append([f.encode("base64") for f in s])
     return super(buildstatusserializer, cls).serialize(ll)