示例#1
0
def recentchanges(rend, args):
    """List recently changed pages. First argument is how many to cut
	the list at, default 50; 0 means no limit, showing everything.
	Additional arguments are which directories to include or (with a dash
	at the start) to exclude from the list; you can use '.' to mean
	the current directory. To preserve the default limit, use a
	value for the first argument that is not a number.
	If we're Striped, list pages under their name not their full path."""

    rend.markComplex()

    # Fix up and handle arguments.
    cutoff = recentArgsFixer(rend, args)

    # For the especially perverse: if the starting path is not a
    # directory, we walk everything. Ha ha you lose.
    startpath = reduce(common_pref_reduce, args[1:], None)
    if startpath is None:
        startpath = ''
    else:
        npage = rend.mod.get_page(startpath)
        if npage.type != "dir":
            startpath = ""

    # Walk root downards, yes this bites.
    # It does give us the entire page list.
    #rl = rend.mod.get_page(startpath).descendants(rend.ctx)
    rl = rend.ctx.cache_page_children(rend.mod.get_page(startpath))
    if len(args) > 1:
        rl = [x for x in rl if file_matches_args(x[1], args[1:])]
    rl = list(rl)
    utils.sort_timelist(rl)

    # We'll show them all if you *really* want. After all,
    # we already took the hit to generate all the data.
    if cutoff > 0:
        nrl = []
        while rl and len(nrl) < cutoff:
            t = rl.pop(0)
            tp = rend.mod.get_page(t[1])
            if not tp.is_redirect():
                nrl.append(t)
        rl = nrl

    if rl:
        # Update the timestamp, to show off:
        rend.ctx.newtime(rl[0][0])
        rl = [z[1] for z in rl]

        # We list with full names unless we're in short display
        # more.
        if rend.useLists:
            pagelist_paths(rend, rl)
        else:
            pagelist_names(rend, rl)
    elif not rend.useLists:
        rend.addPiece("(none)")
    return True
示例#2
0
def recentchanges(rend, args):
	"""List recently changed pages. First argument is how many to cut
	the list at, default 50; 0 means no limit, showing everything.
	Additional arguments are which directories to include or (with a dash
	at the start) to exclude from the list; you can use '.' to mean
	the current directory. To preserve the default limit, use a
	value for the first argument that is not a number.
	If we're Striped, list pages under their name not their full path."""

	rend.markComplex()
	
	# Fix up and handle arguments.
	cutoff = recentArgsFixer(rend, args)
	
	# For the especially perverse: if the starting path is not a
	# directory, we walk everything. Ha ha you lose.
	startpath = reduce(common_pref_reduce, args[1:], None)
	if startpath is None:
		startpath = ''
	else:
		npage = rend.mod.get_page(startpath)
		if npage.type != "dir":
			startpath = ""

	# Walk root downards, yes this bites.
	# It does give us the entire page list.
	#rl = rend.mod.get_page(startpath).descendants(rend.ctx)
	rl = rend.ctx.cache_page_children(rend.mod.get_page(startpath))
	if len(args) > 1:
		rl = [x for x in rl if file_matches_args(x[1], args[1:])]
	rl = list(rl)
	utils.sort_timelist(rl)

	# We'll show them all if you *really* want. After all,
	# we already took the hit to generate all the data.
	if cutoff > 0:
		nrl = []
		while rl and len(nrl) < cutoff:
			t = rl.pop(0)
			tp = rend.mod.get_page(t[1])
			if not tp.is_redirect():
				nrl.append(t)
		rl = nrl

	if rl:
		# Update the timestamp, to show off:
		rend.ctx.newtime(rl[0][0])
		rl = [z[1] for z in rl]

		# We list with full names unless we're in short display
		# more.
		if rend.useLists:
			pagelist_paths(rend, rl)
		else:
			pagelist_names(rend, rl)
	elif not rend.useLists:
		rend.addPiece("(none)")
	return True
示例#3
0
文件: atomgen.py 项目: xakon/dwiki
def _fillpages(context):
    r = context.getcache(atom_cachekey)
    if r is not None:
        return r
    if context.page.type != "dir":
        return []
    cutpoint = get_cutpoint(context)
    cuttime = get_cuttime(context)

    #dl = context.page.descendants(context)
    # We deliberately use this context routine because it uses the
    # disk cache (if that exists).
    dl = context.cache_page_children(context.page)

    # Force the generator to be expanded to a full list so we can use
    # .sort on it.
    dl = list(dl)
    utils.sort_timelist(dl)
    if not dl:
        context.setcache(atom_cachekey, [])
        return []

    res = []
    count = 0
    dupDict = {}
    for ent in dl:
        if count >= cutpoint:
            break
        # Drop pages older than our cut time.
        if ent[0] < cuttime:
            continue
        np = context.model.get_page(ent[1])
        # We explicitly don't check access permissions here,
        # because what to show for forbidden pages is a policy
        # decision that is inappropriate to make here.
        if np.is_util() or np.is_redirect() or not np.displayable():
            continue
        # Suppress duplicate pages; these might happen through,
        # eg, hardlinks. When this happens we put only the first
        # one encountered in the Atom feed. Our sorting process
        # means that this is the lexically first, which may not
        # actually be the same one that was in the *last* Atom
        # feed generation run, but that's life.
        # Tricky issue: we assume that all versions of the page
        # have the same access permissions. If they don't, this
        # may suppress readable pages in favour of earlier
        # unreadable ones.
        pageid = np.identity()
        if pageid in dupDict:
            continue
        else:
            dupDict[pageid] = True
        count += 1
        res.append(ent)
    context.setcache(atom_cachekey, res)
    return res
示例#4
0
def _fillpages(context):
	r = context.getcache(atom_cachekey)
	if r is not None:
		return r
	if context.page.type != "dir":
		return []
	cutpoint = get_cutpoint(context)
	cuttime = get_cuttime(context)

	#dl = context.page.descendants(context)
	# We deliberately use this context routine because it uses the
	# disk cache (if that exists).
	dl = context.cache_page_children(context.page)

	# Force the generator to be expanded to a full list so we can use
	# .sort on it.
	dl = list(dl)
	utils.sort_timelist(dl)
	if not dl:
		context.setcache(atom_cachekey, [])
		return []

	res = []
	count = 0
	dupDict = {}
	for ent in dl:
		if count >= cutpoint:
			break
		# Drop pages older than our cut time.
		if ent[0] < cuttime:
			continue
		np = context.model.get_page(ent[1])
		# We explicitly don't check access permissions here,
		# because what to show for forbidden pages is a policy
		# decision that is inappropriate to make here.
		if np.is_util() or np.is_redirect() or not np.displayable():
			continue
		# Suppress duplicate pages; these might happen through,
		# eg, hardlinks. When this happens we put only the first
		# one encountered in the Atom feed. Our sorting process
		# means that this is the lexically first, which may not
		# actually be the same one that was in the *last* Atom
		# feed generation run, but that's life.
		# Tricky issue: we assume that all versions of the page
		# have the same access permissions. If they don't, this
		# may suppress readable pages in favour of earlier
		# unreadable ones.
		pageid = np.identity()
		if pageid in dupDict:
			continue
		else:
			dupDict[pageid] = True
		count += 1
		res.append(ent)
	context.setcache(atom_cachekey, res)
	return res
示例#5
0
文件: atomgen.py 项目: xakon/dwiki
def _fillcomments(context):
    r = context.getcache(atom_comkey)
    if r is not None:
        return r
    cutpoint = get_cutpoint(context)

    #dl = context.model.comments_children(context.page.me())
    dl = comments.cached_comments_children(context, context.page.me())
    # Force the generator to be expanded to a full list, so we can
    # sort it.
    dl = list(dl)
    utils.sort_timelist(dl)
    if not dl:
        context.setcache(atom_comkey, [])
        return []

    # Virtualization of comments means that we restrict the pages
    # that the comments are on to be within the virtualization
    # range. We cannot simply use pageranges.filter_files() on
    # the comments list itself, because the timestamps in that
    # are the *comment* timestamps, not the *page* timestamps.
    filterComments = False
    filterD = {}
    if pageranges.is_restriction(context):
        filterComments = True
        for ts, p in context.page.descendants(context):
            filterD[p] = True

    res = []
    count = 0
    for ts, path, cname in dl:
        if count > cutpoint:
            break
        if filterComments and path not in filterD:
            continue
        np = context.model.get_page(path)
        # We drop entirely pages that can't be accessed with
        # the current (lack of) permissions, rather than
        # insert a message about denied content; this seems
        # better.
        if not np.displayable() or np.is_redirect() or \
           not np.access_ok(context):
            continue
        c = context.model.get_comment(np, cname)
        if not c:
            continue
        count += 1
        res.append((ts, path, cname, c))
    context.setcache(atom_comkey, res)
    return res
示例#6
0
def _fillcomments(context):
	r = context.getcache(atom_comkey)
	if r is not None:
		return r
	cutpoint = get_cutpoint(context)

	#dl = context.model.comments_children(context.page.me())
	dl = comments.cached_comments_children(context, context.page.me())
	# Force the generator to be expanded to a full list, so we can
	# sort it.
	dl = list(dl)
	utils.sort_timelist(dl)
	if not dl:
		context.setcache(atom_comkey, [])
		return []

	# Virtualization of comments means that we restrict the pages
	# that the comments are on to be within the virtualization
	# range. We cannot simply use pageranges.filter_files() on
	# the comments list itself, because the timestamps in that
	# are the *comment* timestamps, not the *page* timestamps.
	filterComments = False
	filterD = {}
	if pageranges.is_restriction(context):
		filterComments = True
		for ts, p in context.page.descendants(context):
			filterD[p] = True

	res = []
	count = 0
	for ts, path, cname in dl:
		if count > cutpoint:
			break
		if filterComments and path not in filterD:
			continue
		np = context.model.get_page(path)
		# We drop entirely pages that can't be accessed with
		# the current (lack of) permissions, rather than
		# insert a message about denied content; this seems
		# better.
		if not np.displayable() or np.is_redirect() or \
		   not np.access_ok(context):
			continue
		c = context.model.get_comment(np, cname)
		if not c:
			continue
		count += 1
		res.append((ts, path, cname, c))
	context.setcache(atom_comkey, res)
	return res
示例#7
0
def filter_files(context, flist):
	if not is_restriction(context):
		return flist
	rtype = context[rest_type]
	rargs = context[rest_val]

	# We need flist as a real list and sorted with the most recent
	# first. We start by stashing away some data for later use.
	flist = list(flist)
	utils.sort_timelist(flist)
	context.setvar(rest_hitstore, len(flist))

	if rtype == 'latest':
		return flist[:rargs]
	elif rtype == 'oldest':
		return flist[-rargs:]
	elif rtype == 'range':
		# Python makes this all work out for us. start and
		# end are one-based.
		start, end = rargs
		return flist[start-1:end]
	elif rtype == "calendar":
		rl = []
		just_before = None
		just_later = None
		# FIXME
		# The day, month, or year may be invalid, in which case
		# the datetime.date conversion in crange_to_limits()
		# will throw a ValueError. Catching it here is a crude
		# hack.
		try:
			r1, r2 = crange_to_limits(rargs)
		except ValueError:
			context.setvar(rest_hitstore, (None, None))
			return rl
		for e in flist:
			r = calendar_cmp(r1, r2, e[0])
			if r > 0:
				just_before = e[0]
			elif r < 0:
				just_later = e[0]
				break
			else:
				rl.append(e)
		context.setvar(rest_hitstore, (just_before, just_later))
		flist = rl
	# ... for now
	return flist
示例#8
0
def rec_comment_pages(rend, args):
	"""List pages with recent comments. Arguments are the same as for
	RecentChanges."""
	rend.markComplex()
	cutoff = recentArgsFixer(rend, args)
	
	startpath = reduce(common_pref_reduce, args[1:], None)
	if startpath is None:
		startpath = ''

	spage = rend.mod.get_page(startpath)
	#cl = rend.mod.comments_children(spage)
	cl = comments.cached_comments_children(rend.ctx, spage)
	# There is no point checking cl, because it is always a generator.

	# Now we get to condense it from a list of recent comments
	# down to a list of pages with recent comments.
	d = {}
	fargs = args[1:]
	for com in cl:
		ppath = com[1]
		if ppath in d or \
		   (fargs and file_matches_args(ppath, fargs)):
			continue
		d[ppath] = com[0]
	cl = [(d[x], x) for x in d.keys()]
	utils.sort_timelist(cl)
	
	# Unlike RecentChanges, we know that these should be real pages.
	# (If they're not, we have problems.)
	if cutoff > 0 and cl:
		cl = cl[:cutoff]
	if cl:
		rend.ctx.newtime(cl[0][0])
		cl = [z[1] for z in cl]
		# We list with full names unless we're in short display
		# more.
		view = rend.ctx.comment_view()
		if rend.useLists:
			pagelist_paths(rend, cl, view)
		else:
			pagelist_names(rend, cl, view)
	elif not rend.useLists:
		rend.addPiece("(none)")
	return True
示例#9
0
def rec_comment_pages(rend, args):
    """List pages with recent comments. Arguments are the same as for
	RecentChanges."""
    rend.markComplex()
    cutoff = recentArgsFixer(rend, args)

    startpath = reduce(common_pref_reduce, args[1:], None)
    if startpath is None:
        startpath = ''

    spage = rend.mod.get_page(startpath)
    #cl = rend.mod.comments_children(spage)
    cl = comments.cached_comments_children(rend.ctx, spage)
    # There is no point checking cl, because it is always a generator.

    # Now we get to condense it from a list of recent comments
    # down to a list of pages with recent comments.
    d = {}
    fargs = args[1:]
    for com in cl:
        ppath = com[1]
        if ppath in d or \
           (fargs and file_matches_args(ppath, fargs)):
            continue
        d[ppath] = com[0]
    cl = [(d[x], x) for x in d.keys()]
    utils.sort_timelist(cl)

    # Unlike RecentChanges, we know that these should be real pages.
    # (If they're not, we have problems.)
    if cutoff > 0 and cl:
        cl = cl[:cutoff]
    if cl:
        rend.ctx.newtime(cl[0][0])
        cl = [z[1] for z in cl]
        # We list with full names unless we're in short display
        # more.
        view = rend.ctx.comment_view()
        if rend.useLists:
            pagelist_paths(rend, cl, view)
        else:
            pagelist_names(rend, cl, view)
    elif not rend.useLists:
        rend.addPiece("(none)")
    return True
示例#10
0
	def cache_page_children(self, page):
		rp = page.path
		res = self.getcache(("pagekids", rp))
		if res is not None:
			return res
		res = self._get_disk_cpc(page)
		if res is not None:
			# If the general disk cache hit, we must load
			# the in-memory cache.
			self.setcache(("pagekids", rp), res)
			return res
		# Full miss. Go to all the work.
		# 
		# descendants() may return an iterator, which is
		# absolutely no good to cache. So we must list-ize
		# it, no matter how annoying that is.
		res = list(page.descendants(self))
		# To be sure we sort it before we store it.
		utils.sort_timelist(res)
		self.setcache(("pagekids", rp), res)
		self._set_disk_cpc(page, res)
		return res
示例#11
0
文件: context.py 项目: xakon/dwiki
 def cache_page_children(self, page):
     rp = page.path
     res = self.getcache(("pagekids", rp))
     if res is not None:
         return res
     res = self._get_disk_cpc(page)
     if res is not None:
         # If the general disk cache hit, we must load
         # the in-memory cache.
         self.setcache(("pagekids", rp), res)
         return res
     # Full miss. Go to all the work.
     #
     # descendants() may return an iterator, which is
     # absolutely no good to cache. So we must list-ize
     # it, no matter how annoying that is.
     res = list(page.descendants(self))
     # To be sure we sort it before we store it.
     utils.sort_timelist(res)
     self.setcache(("pagekids", rp), res)
     self._set_disk_cpc(page, res)
     return res
示例#12
0
def recentcomments(rend, args):
	"""List recent comments. Arguments are the same as for
	RecentChanges. Use with _Striped_ is somewhat dubious."""
	rend.markComplex()
	cutoff = recentArgsFixer(rend, args)
	
	startpath = reduce(common_pref_reduce, args[1:], None)
	if startpath is None:
		startpath = ''

	spage = rend.mod.get_page(startpath)
	#cl = rend.mod.comments_children(spage)
	cl = comments.cached_comments_children(rend.ctx, spage)
	# There is no point checking cl, because it is always a generator.

	if len(args) > 1:
		fargs = args[1:]
		cl = [x for x in cl if file_matches_args(x[1], fargs)]
	cl = list(cl)
	utils.sort_timelist(cl)
	if cutoff > 0:
		cl = cl[:cutoff]
	if not cl:
		if not rend.useLists:
			rend.addPiece("(none)")
		return True

	view = rend.ctx.comment_view()
	rend.ctx.newtime(cl[0][0])

	def _cominfo(ppath, cname):
		npage = rend.mod.get_page(ppath)
		c = rend.mod.get_comment(npage, cname)
		if not c:
			return (None, None, None)
		url = rend.ctx.url(npage, view)
		ca = comments.anchor_for(c)
		url += '#%s' % ca
		return (c, npage, url, ca)

	def _lif((ts, ppath, cname)):
		(c, npage, url, ca) = _cominfo(ppath, cname)
		if not c:
			return
		rend.addPiece('<a href="%s">' % url)
		if c.username and not c.is_anon(rend.ctx):
			rend.text("%s (%s)" % (c.username, c.user), "none")
		elif not c.is_anon(rend.ctx):
			rend.text(c.user, "none")
		elif c.username:
			rend.text(c.username, "none")
		else:
			rend.text(c.ip, "none")
	
		tstr = time.strftime("%Y-%m-%d %H:%M",
				     time.localtime(c.time))
		rend.addPiece(" at "+tstr)
		rend.addPiece("</a>, on ")
		url2 = rend.ctx.url(npage, view)
		rend.makelink(url2, npage.name)
		rend.addPiece("\n")
	def _bf((ts, ppath, cname)):
		(c, npage, url, ca) = _cominfo(ppath, cname)
		if not c:
			return
		rend.addPiece('<a href="%s">' % url)
		rend.text(ca, "none")
		rend.addPiece("</a>")
	rend.macro_list(_lif, _bf, cl)
	return True
示例#13
0
def recentcomments(rend, args):
    """List recent comments. Arguments are the same as for
	RecentChanges. Use with _Striped_ is somewhat dubious."""
    rend.markComplex()
    cutoff = recentArgsFixer(rend, args)

    startpath = reduce(common_pref_reduce, args[1:], None)
    if startpath is None:
        startpath = ''

    spage = rend.mod.get_page(startpath)
    #cl = rend.mod.comments_children(spage)
    cl = comments.cached_comments_children(rend.ctx, spage)
    # There is no point checking cl, because it is always a generator.

    if len(args) > 1:
        fargs = args[1:]
        cl = [x for x in cl if file_matches_args(x[1], fargs)]
    cl = list(cl)
    utils.sort_timelist(cl)
    if cutoff > 0:
        cl = cl[:cutoff]
    if not cl:
        if not rend.useLists:
            rend.addPiece("(none)")
        return True

    view = rend.ctx.comment_view()
    rend.ctx.newtime(cl[0][0])

    def _cominfo(ppath, cname):
        npage = rend.mod.get_page(ppath)
        c = rend.mod.get_comment(npage, cname)
        if not c:
            return (None, None, None)
        url = rend.ctx.url(npage, view)
        ca = comments.anchor_for(c)
        url += '#%s' % ca
        return (c, npage, url, ca)

    def _lif((ts, ppath, cname)):
        (c, npage, url, ca) = _cominfo(ppath, cname)
        if not c:
            return
        rend.addPiece('<a href="%s">' % url)
        if c.username and not c.is_anon(rend.ctx):
            rend.text("%s (%s)" % (c.username, c.user), "none")
        elif not c.is_anon(rend.ctx):
            rend.text(c.user, "none")
        elif c.username:
            rend.text(c.username, "none")
        else:
            rend.text(c.ip, "none")

        tstr = time.strftime("%Y-%m-%d %H:%M", time.localtime(c.time))
        rend.addPiece(" at " + tstr)
        rend.addPiece("</a>, on ")
        url2 = rend.ctx.url(npage, view)
        rend.makelink(url2, npage.name)
        rend.addPiece("\n")

    def _bf((ts, ppath, cname)):
        (c, npage, url, ca) = _cominfo(ppath, cname)
        if not c:
            return
        rend.addPiece('<a href="%s">' % url)
        rend.text(ca, "none")
        rend.addPiece("</a>")

    rend.macro_list(_lif, _bf, cl)
    return True