Exemplo n.º 1
0
    def entries(sortcolumn="", descending=False, subdir="", **map):
        rows = []
        parity = common.paritygen(stripecount)
        for repo in Repository.objects.has_view_permission(request.user):
            contact = smart_str(repo.owner.get_full_name())

            lastchange = (common.get_mtime(repo.location), util.makedate()[1])
             
            row = dict(contact=contact or "unknown",
                       contact_sort=contact.upper() or "unknown",
                       name=smart_str(repo.name),
                       name_sort=smart_str(repo.name),
                       url=repo.get_absolute_url(),
                       description=smart_str(repo.description) or "unknown",
                       description_sort=smart_str(repo.description.upper()) or "unknown",
                       lastchange=lastchange,
                       lastchange_sort=lastchange[1]-lastchange[0],
                       archives=archivelist(u, "tip", url))
            if (not sortcolumn or (sortcolumn, descending) == sortdefault):
                # fast path for unsorted output
                row['parity'] = parity.next()
                yield row
            else:
                rows.append((row["%s_sort" % sortcolumn], row))

        if rows:
            rows.sort()
            if descending:
                rows.reverse()
            for key, row in rows:
                row['parity'] = parity.next()
                yield row
Exemplo n.º 2
0
def filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt

    def lines():
        for lineno, t in enumerate(text.splitlines(1)):
            yield {"line": t,
                   "lineid": "l%d" % (lineno + 1),
                   "linenumber": "% 6d" % (lineno + 1),
                   "parity": parity.next()}

    return tmpl("filerevision",
                file=f,
                path=webutil.up(f),
                text=lines(),
                rev=fctx.rev(),
                node=hex_(fctx.node()),
                author=fctx.user(),
                date=fctx.date(),
                desc=fctx.description(),
                branch=webutil.nodebranchnodefault(fctx),
                parent=webutil.parents(fctx),
                child=webutil.children(fctx),
                rename=webutil.renamelink(fctx),
                permissions=fctx.manifest().flags(f))
Exemplo n.º 3
0
    def diff(self, tmpl, node1, node2, files):
        def filterfiles(filters, files):
            l = [x for x in files if x in filters]

            for t in filters:
                if t and t[-1] != os.sep:
                    t += os.sep
                l += [x for x in files if x.startswith(t)]
            return l

        parity = paritygen(self.stripecount)

        def diffblock(diff, f, fn):
            yield tmpl(
                "diffblock", lines=prettyprintlines(diff), parity=parity.next(), file=f, filenode=hex(fn or nullid)
            )

        blockcount = webutil.countgen()

        def prettyprintlines(diff):
            blockno = blockcount.next()
            for lineno, l in enumerate(diff.splitlines(1)):
                if blockno == 0:
                    lineno = lineno + 1
                else:
                    lineno = "%d.%d" % (blockno, lineno + 1)
                if l.startswith("+"):
                    ltype = "difflineplus"
                elif l.startswith("-"):
                    ltype = "difflineminus"
                elif l.startswith("@"):
                    ltype = "difflineat"
                else:
                    ltype = "diffline"
                yield tmpl(ltype, line=l, lineid="l%s" % lineno, linenumber="% 8s" % lineno)

        r = self.repo
        c1 = r[node1]
        c2 = r[node2]
        date1 = util.datestr(c1.date())
        date2 = util.datestr(c2.date())

        modified, added, removed, deleted, unknown = r.status(node1, node2)[:5]
        if files:
            modified, added, removed = map(lambda x: filterfiles(files, x), (modified, added, removed))

        for f in modified:
            to = c1.filectx(f).data()
            tn = c2.filectx(f).data()
            yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f), f, tn)
        for f in added:
            to = None
            tn = c2.filectx(f).data()
            yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f), f, tn)
        for f in removed:
            to = c1.filectx(f).data()
            tn = None
            yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f), f, tn)
Exemplo n.º 4
0
def changelog(web, ctx, tmpl, shortlog = False):
    def changelist(limit=0, **map):
        l = [] # build a list in forward order for efficiency
        for i in xrange(start, end):
            ctx = web.repo[i]
            n = ctx.node()
            showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
            files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)

            l.insert(0, {"parity": parity.next(),
                         "author": ctx.user(),
                         "parent": webutil.parents(ctx, i - 1),
                         "child": webutil.children(ctx, i + 1),
                         "changelogtag": showtags,
                         "desc": ctx.description(),
                         "date": ctx.date(),
                         "files": files,
                         "rev": i,
                         "node": hex_(n),
                         "tags": webutil.nodetagsdict(web.repo, n),
                         "inbranch": webutil.nodeinbranch(web.repo, ctx),
                         "branches": webutil.nodebranchdict(web.repo, ctx)
                        })

        if limit > 0:
            l = l[:limit]

        for e in l:
            yield e

    maxchanges = shortlog and web.maxshortchanges or web.maxchanges
    cl = web.repo.changelog
    count = len(cl)
    pos = ctx.rev()
    start = max(0, pos - maxchanges + 1)
    end = min(count, start + maxchanges)
    pos = end - 1
    parity = paritygen(web.stripecount, offset=start-end)

    changenav = webutil.revnav(web.repo).gen(pos, maxchanges, count)

    return tmpl(shortlog and 'shortlog' or 'changelog',
                changenav=changenav,
                node=hex_(ctx.node()),
                rev=pos, changesets=count,
                entries=lambda **x: changelist(limit=0,**x),
                latestentry=lambda **x: changelist(limit=1,**x),
                archives=web.archivelist("tip"))
Exemplo n.º 5
0
        def entries(sortcolumn="", descending=False, **map):
            def sessionvars(**map):
                fields = []
                style = self.request.get("style", None)
                if style:
                    fields.append(('style', style))

                separator = self.request.url[-1] == '?' and ';' or '?'
                for name, value in fields:
                    yield dict(name=name, value=value, separator=separator)
                    separator = ';'

            rows = []
            parity = paritygen(self.stripecount)
            (owned, viewable, public) = self.repositories()
            repos = owned.values() + viewable.values() + public.values()
            for r in repos:
                url = "/" + r.name
                d = (r.last_modified, r.last_modified_tz)

                contact = r.owner.user.nickname()
                description = r.description or ""
                row = dict(contact=contact or "unknown",
                           contact_sort=contact.upper() or "unknown",
                           name=r.name,
                           name_sort=r.name,
                           url=url,
                           description=description or "unknown",
                           description_sort=description.upper() or "unknown",
                           lastchange=d,
                           lastchange_sort=d[1]-d[0],
                           sessionvars=sessionvars,
                           archives=archivelist("tip", url))
                if (not sortcolumn
                    or (sortcolumn, descending) == self.repos_sorted):
                    # fast path for unsorted output
                    row['parity'] = parity.next()
                    yield row
                else:
                    rows.append((row["%s_sort" % sortcolumn], row))
            if rows:
                rows.sort()
                if descending:
                    rows.reverse()
                for key, row in rows:
                    row['parity'] = parity.next()
                    yield row
def pushlogHTML(web, req, tmpl):
    """WebCommand for producing the HTML view of the pushlog."""
    query = pushlogSetup(web.repo, req)

    # these three functions are in webutil in newer hg, but not in hg 1.0
    def nodetagsdict(repo, node):
        return [{"name": i} for i in repo.nodetags(node)]

    def nodebranchdict(repo, ctx):
        branches = []
        branch = ctx.branch()
        # If this is an empty repo, ctx.node() == nullid,
        # ctx.branch() == 'default', but branchmap is
        # an empty dict. Using dict.get avoids a traceback.
        if repo.branchmap().get(branch) == ctx.node():
            branches.append({'name': branch})
        return branches

    def nodeinbranch(repo, ctx):
        branches = []
        branch = ctx.branch()
        if branch != 'default' and repo.branchmap().get(branch) != ctx.node():
            branches.append({'name': branch})
        return branches

    def changenav():
        nav = []
        numpages = int(ceil(query.totalentries / float(PUSHES_PER_PAGE)))
        start = max(1, query.page - PUSHES_PER_PAGE/2)
        end = min(numpages + 1, query.page + PUSHES_PER_PAGE/2)
        if query.page != 1:
            nav.append({'page': 1, 'label': "First"})
            nav.append({'page': query.page - 1, 'label': "Prev"})
        for i in range(start, end):
            nav.append({'page': i, 'label': str(i)})
        
        if query.page != numpages:
            nav.append({'page': query.page + 1, 'label': "Next"})
            nav.append({'page': numpages, 'label': "Last"})
        return nav

    def changelist(limit=0, **map):
        # useless fallback
        listfilediffs = lambda a,b,c: []
        if hasattr(webutil, 'listfilediffs'):
            listfilediffs = lambda a,b,c: webutil.listfilediffs(a,b,c, len(b))
        elif hasattr(web, 'listfilediffs'):
            listfilediffs = web.listfilediffs

        allentries = []
        lastid = None
        ch = None
        l = []
        mergehidden = ""
        p = 0
        currentpush = None
        for id, user, date, node in query.entries:
            if isinstance(node, unicode):
                node = node.encode('utf-8')
            ctx = web.repo.changectx(node)
            n = ctx.node()
            entry = {"author": ctx.user(),
                     "desc": ctx.description(),
                     "files": listfilediffs(tmpl, ctx.files(), n),
                     "rev": ctx.rev(),
                     "node": hex(n),
                     "parents": [c.hex() for c in ctx.parents()],
                     "tags": nodetagsdict(web.repo, n),
                     "branches": nodebranchdict(web.repo, ctx),
                     "inbranch": nodeinbranch(web.repo, ctx),
                     "hidden": "",
                     "push": [],
                     "mergerollup": [],
                     "id": id
                     }
            if id != lastid:
                lastid = id
                p = parity.next()
                entry["push"] = [{"user": user,
                                  "date": localdate(date)}]
                if len([c for c in ctx.parents() if c.node() != nullid]) > 1:
                    mergehidden = "hidden"
                    entry["mergerollup"] = [{"count": 0}]
                else:
                    mergehidden = ""
                currentpush = entry
            else:
                entry["hidden"] = mergehidden
                if mergehidden:
                    currentpush["mergerollup"][0]["count"] += 1
            entry["parity"] = p
            l.append(entry)

        if limit > 0:
            l = l[:limit]

        for e in l:
            yield e

    parity = paritygen(web.stripecount)

    return tmpl('pushlog',
                changenav=changenav(),
                rev=0,
                entries=lambda **x: changelist(limit=0,**x),
                latestentry=lambda **x: changelist(limit=1,**x),
                startdate='startdate' in req.form and req.form['startdate'][0] or '1 week ago',
                enddate='enddate' in req.form and req.form['enddate'][0] or 'now',
                querydescription=query.description(),
                archives=web.archivelist("tip"))
Exemplo n.º 7
0
def pushlogHTML(web, req, tmpl):
    """WebCommand for producing the HTML view of the pushlog."""
    query = pushlogSetup(web.repo, req)

    # these three functions are in webutil in newer hg, but not in hg 1.0
    def nodetagsdict(repo, node):
        return [{"name": i} for i in repo.nodetags(node)]

    def nodebranchdict(repo, ctx):
        branches = []
        branch = ctx.branch()
        # If this is an empty repo, ctx.node() == nullid,
        # ctx.branch() == 'default', but branchmap is
        # an empty dict. Using dict.get avoids a traceback.
        if repo.branchmap().get(branch) == ctx.node():
            branches.append({'name': branch})
        return branches

    def nodeinbranch(repo, ctx):
        branches = []
        branch = ctx.branch()
        if branch != 'default' and repo.branchmap().get(branch) != ctx.node():
            branches.append({'name': branch})
        return branches

    def changenav():
        nav = []
        numpages = int(ceil(query.totalentries / float(PUSHES_PER_PAGE)))
        start = max(1, query.page - PUSHES_PER_PAGE/2)
        end = min(numpages + 1, query.page + PUSHES_PER_PAGE/2)
        if query.page != 1:
            nav.append({'page': 1, 'label': "First"})
            nav.append({'page': query.page - 1, 'label': "Prev"})
        for i in range(start, end):
            nav.append({'page': i, 'label': str(i)})

        if query.page != numpages:
            nav.append({'page': query.page + 1, 'label': "Next"})
            nav.append({'page': numpages, 'label': "Last"})
        return nav

    def changelist(limit=0, **map):
        # useless fallback
        listfilediffs = lambda a,b,c: []
        if hasattr(webutil, 'listfilediffs'):
            listfilediffs = lambda a,b,c: webutil.listfilediffs(a,b,c, len(b))
        elif hasattr(web, 'listfilediffs'):
            listfilediffs = web.listfilediffs

        lastid = None
        l = []
        mergehidden = ""
        p = 0
        currentpush = None
        for id, user, date, node in query.entries:
            if isinstance(node, unicode):
                node = node.encode('utf-8')

            try:
                ctx = web.repo[node]
            # Changeset is hidden.
            except error.FilteredRepoLookupError:
                continue
            n = ctx.node()
            entry = {"author": ctx.user(),
                     "desc": ctx.description(),
                     "files": listfilediffs(tmpl, ctx.files(), n),
                     "rev": ctx.rev(),
                     "node": hex(n),
                     "parents": [c.hex() for c in ctx.parents()],
                     "tags": nodetagsdict(web.repo, n),
                     "branches": nodebranchdict(web.repo, ctx),
                     "inbranch": nodeinbranch(web.repo, ctx),
                     "hidden": "",
                     "push": [],
                     "mergerollup": [],
                     "id": id
                     }
            if id != lastid:
                lastid = id
                p = parity.next()
                entry["push"] = [{"user": user,
                                  "date": localdate(date)}]
                if len([c for c in ctx.parents() if c.node() != nullid]) > 1:
                    mergehidden = "hidden"
                    entry["mergerollup"] = [{"count": 0}]
                else:
                    mergehidden = ""
                currentpush = entry
            else:
                entry["hidden"] = mergehidden
                if mergehidden:
                    currentpush["mergerollup"][0]["count"] += 1
            entry["parity"] = p
            l.append(entry)

        if limit > 0:
            l = l[:limit]

        for e in l:
            yield e

    parity = paritygen(web.stripecount)

    return tmpl('pushlog',
                changenav=changenav(),
                rev=0,
                entries=lambda **x: changelist(limit=0,**x),
                latestentry=lambda **x: changelist(limit=1,**x),
                startdate='startdate' in req.form and req.form['startdate'][0] or '1 week ago',
                enddate='enddate' in req.form and req.form['enddate'][0] or 'now',
                querydescription=query.description(),
                archives=web.archivelist("tip"))
Exemplo n.º 8
0
def pushlog_changelist(_context, web, query, tiponly):
    '''Generator which yields a entries in a changelist for the pushlog
    '''
    parity = paritygen(web.stripecount)
    p = next(parity)

    # Iterate over query entries if we have not reached the limit and
    # the node is visible in the repo
    visiblequeryentries = (
        (pushid, user, date, node)
        for pushid, user, date, node in query.entries
        if scmutil.isrevsymbol(web.repo, node)
    )

    # FIFO queue. Accumulate pushes as we need to
    # count how many entries correspond with a given push
    samepush = collections.deque()

    # Get the first element of the query
    # return if there are no entries
    try:
        pushid, user, date, node = next(visiblequeryentries)

        lastid = pushid
        samepush.append(
            (pushid, user, date, node)
        )
    except StopIteration:
        return

    # Iterate over all the non-hidden entries and aggregate
    # them together per unique pushid
    for allentry in visiblequeryentries:
        pushid, user, date, node = allentry

        # If the entries both come from the same push, add to the accumulated set of entries
        if pushid == lastid:
            samepush.append(allentry)

        # Once the pushid's are different, yield the result
        else:
            # If this is the first changeset for this push, put the change in the queue
            firstpush = len(samepush) == 0

            if firstpush:
                samepush.append(allentry)

            for entry in handle_entries_for_push(web, samepush, p):
                yield entry

                if tiponly:
                    return

            # Set the lastid
            lastid = pushid

            # Swap parity once we are on to processing another push
            p = next(parity)

            # Reset the aggregation of entries, as we are now processing a new push
            samepush = collections.deque()

            # If this was not the first push, the current entry needs processing
            # Add it to the queue here
            if not firstpush:
                samepush.append(allentry)

    # We don't need to display the remaining entries on the page if there are none
    if not samepush:
        return

    # Display the remaining entries for the page
    for entry in handle_entries_for_push(web, samepush, p):
        yield entry

        if tiponly:
            return
Exemplo n.º 9
0
def status(web, tmpl, ctx, path, st, datefmt='isodate'):
    """\
    Based on hgweb.manifest, adapted to included features found in
    hg status.

    Initial parameters are the same as manifest.  New parameters:

    ctx
        - should be the workingctx
    st 
        - the tuple returned from repo.status
    datefmt
        - the date format of the full filelist.
    """

    changetypes = (
        'modified', 'added', 'removed', 'deleted', 'unknown', 'ignored',
        'clean',
    )
    # status listing
    statlist = dict(zip(changetypes, st))
    filestatlist = {}
    for k, v in statlist.iteritems():
        for f in v:
            filestatlist[f] = k
    mf = ctx.manifest()
    node = ctx.node()

    files = {}
    parity = paritygen(web.stripecount)

    if path and path[-1] != "/":
        path += "/"
    l = len(path)
    abspath = "/" + path

    for f, n in mf.items():
        if f[:l] != path:
            continue
        remain = f[l:]
        if "/" in remain:
            short = remain[:remain.index("/") + 1] # bleah
            files[short] = (f, None)
        else:
            short = os.path.basename(remain)
            files[short] = (f, n)

    def filelist(**map):
        fl = files.keys()
        fl.sort()
        for f in fl:
            full, fnode = files[f]
            if not fnode:
                continue
            fctx = ctx.filectx(full)
            yield {"file": full,
                   "status": filestatlist[full],
                   "parity": parity.next(),
                   "basename": f,
                   "date": fctx.changectx().date(),
                   "size": fctx.size(),
                   "permissions": mf.flags(full),
                   }

    def dirlist(**map):
        fl = files.keys()
        fl.sort()
        for f in fl:
            full, fnode = files[f]
            if fnode:
                continue

            yield {"parity": parity.next(),
                   "path": "%s%s" % (abspath, f),
                   "basename": f[:-1]}

    def fulllist(**map):
        for i in dirlist():
            # remove first slash
            i['file'] = i['path'][1:]
            i['permissions'] = 'drwxr-xr-x'
            yield i
        for i in filelist():
            i['date'] = utils.filter(i['date'], datefmt)
            i['permissions'] = utils.filter(i['permissions'], 'permissions')
            yield i

    return tmpl("status",
                 rev=ctx.rev(),
                 node=hex_(node),
                 path=abspath,
                 up=webutil.up(abspath),
                 upparity=parity.next(),
                 fentries=filelist,
                 dentries=dirlist,
                 aentries=fulllist,
                 archives=[], # web.archivelist(hex_(node)),
                 tags=webutil.nodetagsdict(web.repo, ctx),
                 branches=webutil.nodebranchdict(web.repo, ctx))