コード例 #1
0
ファイル: journal.py プロジェクト: ahornby/eden
def _mergeentriesiter(*iterables, **kwargs):
    """Given a set of sorted iterables, yield the next entry in merged order

    Note that by default entries go from most recent to oldest.
    """
    order = kwargs.pop(r"order", max)
    iterables = [iter(it) for it in iterables]
    # this tracks still active iterables; iterables are deleted as they are
    # exhausted, which is why this is a dictionary and why each entry also
    # stores the key. Entries are mutable so we can store the next value each
    # time.
    iterable_map = {}
    for key, it in enumerate(iterables):
        try:
            iterable_map[key] = [next(it), key, it]
        except StopIteration:
            # empty entry, can be ignored
            pass

    while iterable_map:
        value, key, it = order(pycompat.itervalues(iterable_map))
        yield value
        try:
            iterable_map[key][0] = next(it)
        except StopIteration:
            # this iterable is empty, remove it from consideration
            del iterable_map[key]
コード例 #2
0
def buildprobtable(fp, cmd="hg manifest tip"):
    """Construct and print a table of probabilities for path name
    components.  The numbers are percentages."""

    counts = collections.defaultdict(lambda: 0)
    for line in os.popen(cmd).read().splitlines():
        if line[-2:] in (".i", ".d"):
            line = line[:-2]
        if line.startswith("data/"):
            line = line[5:]
        for c in line:
            counts[c] += 1
    for c in "\r/\n":
        counts.pop(c, None)
    t = sum(pycompat.itervalues(counts)) / 100.0
    fp.write("probtable = (")
    for i, (k, v) in enumerate(
            sorted(counts.iteritems(), key=lambda x: x[1], reverse=True)):
        if (i % 5) == 0:
            fp.write("\n    ")
        vt = v / t
        if vt < 0.0005:
            break
        fp.write("(%r, %.03f), " % (k, vt))
    fp.write("\n    )\n")
コード例 #3
0
ファイル: linkrevcache.py プロジェクト: miscreant1/eden
 def close(self):
     # the check is necessary if __init__ fails - the caller may call
     # "close" in a "finally" block and it probably does not want close() to
     # raise an exception there.
     if util.safehasattr(self, "_dbs"):
         for db in pycompat.itervalues(self._dbs):
             db.close()
         self._dbs.clear()
コード例 #4
0
def verifyremotefilelog(ui, path, **opts):
    decompress = opts.get("decompress")

    for root, dirs, files in os.walk(path):
        for file in files:
            if file == "repos":
                continue
            filepath = os.path.join(root, file)
            size, firstnode, mapping = parsefileblob(filepath, decompress)
            for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
                if linknode == nullid:
                    actualpath = os.path.relpath(root, path)
                    key = fileserverclient.getcachekey("reponame", actualpath, file)
                    ui.status("%s %s\n" % (key, os.path.relpath(filepath, path)))
コード例 #5
0
ファイル: synthrepo.py プロジェクト: zerkella/eden
def analyze(ui, repo, *revs, **opts):
    """create a simple model of a repository to use for later synthesis

    This command examines every changeset in the given range (or all
    of history if none are specified) and creates a simple statistical
    model of the history of the repository. It also measures the directory
    structure of the repository as checked out.

    The model is written out to a JSON file, and can be used by
    :hg:`synthesize` to create or augment a repository with synthetic
    commits that have a structure that is statistically similar to the
    analyzed repository.
    """
    root = repo.root
    if not root.endswith(os.path.sep):
        root += os.path.sep

    revs = list(revs)
    revs.extend(opts["rev"])
    if not revs:
        revs = [":"]

    output = opts["output"]
    if not output:
        output = os.path.basename(root) + ".json"

    if output == "-":
        fp = sys.stdout
    else:
        fp = open(output, "w")

    # Always obtain file counts of each directory in the given root directory.
    def onerror(e):
        ui.warn(_("error walking directory structure: %s\n") % e)

    dirs = {}
    rootprefixlen = len(root)
    for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
        dirpathfromroot = dirpath[rootprefixlen:]
        dirs[dirpathfromroot] = len(filenames)
        if ".hg" in dirnames:
            dirnames.remove(".hg")

    lineschanged = zerodict()
    children = zerodict()
    p1distance = zerodict()
    p2distance = zerodict()
    linesinfilesadded = zerodict()
    fileschanged = zerodict()
    filesadded = zerodict()
    filesremoved = zerodict()
    linelengths = zerodict()
    interarrival = zerodict()
    parents = zerodict()
    dirsadded = zerodict()
    tzoffset = zerodict()

    # If a mercurial repo is available, also model the commit history.
    if repo:
        revs = scmutil.revrange(repo, revs)
        revs.sort()

        progress = ui.progress
        _analyzing = _("analyzing")
        _changesets = _("changesets")
        _total = len(revs)

        for i, rev in enumerate(revs):
            progress(_analyzing, i, unit=_changesets, total=_total)
            ctx = repo[rev]
            pl = ctx.parents()
            pctx = pl[0]
            prev = pctx.rev()
            children[prev] += 1
            p1distance[rev - prev] += 1
            parents[len(pl)] += 1
            tzoffset[ctx.date()[1]] += 1
            if len(pl) > 1:
                p2distance[rev - pl[1].rev()] += 1
            if prev == rev - 1:
                lastctx = pctx
            else:
                lastctx = repo[rev - 1]
            if lastctx.rev() != nullrev:
                timedelta = ctx.date()[0] - lastctx.date()[0]
                interarrival[roundto(timedelta, 300)] += 1
            diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
            fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
            for filename, mar, lineadd, lineremove, isbin in parsegitdiff(
                    diff):
                if isbin:
                    continue
                added = sum(pycompat.itervalues(lineadd), 0)
                if mar == "m":
                    if added and lineremove:
                        lineschanged[roundto(added, 5),
                                     roundto(lineremove, 5)] += 1
                        filechanges += 1
                elif mar == "a":
                    fileadds += 1
                    if "/" in filename:
                        filedir = filename.rsplit("/", 1)[0]
                        if filedir not in pctx.dirs():
                            diradds += 1
                    linesinfilesadded[roundto(added, 5)] += 1
                elif mar == "r":
                    fileremoves += 1
                for length, count in iteritems(lineadd):
                    linelengths[length] += count
            fileschanged[filechanges] += 1
            filesadded[fileadds] += 1
            dirsadded[diradds] += 1
            filesremoved[fileremoves] += 1

    invchildren = zerodict()

    for rev, count in iteritems(children):
        invchildren[count] += 1

    if output != "-":
        ui.status(_("writing output to %s\n") % output)

    def pronk(d):
        return sorted(iteritems(d), key=lambda x: x[1], reverse=True)

    json.dump(
        {
            "revs": len(revs),
            "initdirs": pronk(dirs),
            "lineschanged": pronk(lineschanged),
            "children": pronk(invchildren),
            "fileschanged": pronk(fileschanged),
            "filesadded": pronk(filesadded),
            "linesinfilesadded": pronk(linesinfilesadded),
            "dirsadded": pronk(dirsadded),
            "filesremoved": pronk(filesremoved),
            "linelengths": pronk(linelengths),
            "parents": pronk(parents),
            "p1distance": pronk(p1distance),
            "p2distance": pronk(p2distance),
            "interarrival": pronk(interarrival),
            "tzoffset": pronk(tzoffset),
        },
        fp,
    )
    fp.close()
コード例 #6
0
def recordfilter(ui, headers, operation=None):

    if ui.interface("chunkselector") != "editor":
        return originalrecordfilter(ui, headers, operation)

    # construct diff string from headers
    if len(headers) == 0:
        return [], {}

    patch = stringio()
    patch.write(crecordmod.diffhelptext)

    specials = {}

    for header in headers:
        patch.write("#\n")
        if header.special():
            # this is useful for special changes, we are able to get away with
            # only including the parts of headers that offer useful info
            specials[header.filename()] = header
            for h in header.header:
                if h.startswith("index "):
                    # starting at 'index', the headers for binary files tend to
                    # stop offering useful info for the viewer
                    patch.write(
                        _("""\
# this modifies a binary file (all or nothing)
"""))
                    break
                if not h.startswith("diff "):
                    # For specials, we only care about the filename header.
                    # The rest can be displayed as comments
                    patch.write("# ")
                patch.write(h)
        else:
            header.write(patch)
            for hunk in header.hunks:
                hunk.write(patch)

    patcheditor = ui.config("ui", "editor.chunkselector")
    if patcheditor is not None:
        override = {("ui", "editor"): patcheditor}
    else:
        override = {}

    with ui.configoverride(override):
        patch = ui.edit(patch.getvalue(), "", action=(operation or "edit"))

    # remove comments from patch
    # if there's an empty line, add a space to it
    patch = [(line if len(line) > 0 else " ") + "\n"
             for line in patch.splitlines() if not line.startswith("#")]

    headers = patchmod.parsepatch(patch)

    applied = {}
    for h in headers:
        if h.filename() in specials:
            h = specials[h.filename()]
        applied[h.filename()] = [h] + h.hunks

    return (
        sum(
            [
                i for i in pycompat.itervalues(applied)
                if i[0].special() or len(i) > 1
            ],
            [],
        ),
        {},
    )