def testgetUnicodeIndexGroup(): result = wikiutil.getUnicodeIndexGroup(['moin-2', 'MoinMoin']) expected = 'MOIN-2' assert result == expected # empty char with pytest.raises(IndexError): result = wikiutil.getUnicodeIndexGroup('')
def _macro_WordIndex(self, args): _ = self._ allpages = int(self.form.get('allpages', [0])[0]) != 0 # Get page list readable by current user # Filter by isSystemPage if needed if allpages: # TODO: make this fast by caching full page list pages = self.request.rootpage.getPageList() else: def filter(name): return not wikiutil.isSystemPage(self.request, name) pages = self.request.rootpage.getPageList(filter=filter) map = {} word_re = re.compile(u'[%s][%s]+' % (config.chars_upper, config.chars_lower), re.UNICODE) for name in pages: for word in word_re.findall(name): try: if not map[word].count(name): map[word].append(name) except KeyError: map[word] = [name] all_words = map.keys() all_words.sort() index_letters = [] current_letter = None html = [] for word in all_words: letter = wikiutil.getUnicodeIndexGroup(word) if letter != current_letter: #html.append(self.formatter.anchordef()) # XXX no text param available! html.append(u'<a name="%s"><h3>%s</h3></a>' % ( wikiutil.quoteWikinameURL(letter), letter.replace('~', 'Others'))) current_letter = letter if letter not in index_letters: index_letters.append(letter) html.append(self.formatter.strong(1)) html.append(word) html.append(self.formatter.strong(0)) html.append(self.formatter.bullet_list(1)) links = map[word] links.sort() last_page = None for name in links: if name == last_page: continue html.append(self.formatter.listitem(1)) html.append(Page(self.request, name).link_to(self.request)) html.append(self.formatter.listitem(0)) html.append(self.formatter.bullet_list(0)) qpagename = wikiutil.quoteWikinameURL(self.formatter.page.page_name) index = _make_index_key(index_letters, u"""<br> <a href="%s?allpages=%d">%s</a> """ % (qpagename, not allpages, (_('Include system pages'), _('Exclude system pages'))[allpages]) ) return u'%s%s' % (index, u''.join(html))
def _macro_TitleIndex(self, args): _ = self._ html = [] index_letters = [] allpages = int(self.form.get('allpages', [0])[0]) != 0 # Get page list readable by current user # Filter by isSystemPage if needed if allpages: # TODO: make this fast by caching full page list pages = self.request.rootpage.getPageList() else: def filter(name): return not wikiutil.isSystemPage(self.request, name) pages = self.request.rootpage.getPageList(filter=filter) # Sort ignoring case tmp = [(name.upper(), name) for name in pages] tmp.sort() pages = [item[1] for item in tmp] current_letter = None for name in pages: letter = name[0].upper() letter = wikiutil.getUnicodeIndexGroup(name) if letter not in index_letters: index_letters.append(letter) if letter != current_letter: html.append(u'<a name="%s"><h3>%s</h3></a>' % ( wikiutil.quoteWikinameURL(letter), letter.replace('~', 'Others'))) current_letter = letter else: html.append(u'<br>') html.append(u'%s\n' % Page(self.request, name).link_to(self.request, attachment_indicator=1)) # add rss link index = '' if 0: # if wikixml.ok: # XXX currently switched off (not implemented) from MoinMoin import wikixml index = (index + self.formatter.url(1, wikiutil.quoteWikinameURL(self.formatter.page.page_name) + "?action=rss_ti", unescaped=1) + self.formatter.icon("rss") + self.formatter.url(0)) qpagename = wikiutil.quoteWikinameURL(self.formatter.page.page_name) index = index + _make_index_key(index_letters, u"""<br> <a href="%s?allpages=%d">%s</a> | <a href="%s?action=titleindex">%s</a> | <a href="%s?action=titleindex&mimetype=text/xml">%s</a> """ % (qpagename, not allpages, (_('Include system pages'), _('Exclude system pages'))[allpages], qpagename, _('Plain title index'), qpagename, _('XML title index')) ) return u'%s%s' % (index, u''.join(html))
def _make_index(self, word_re=u'.+'): """ make an index page (used for TitleIndex and WordIndex macro) word_re is a regex used for splitting a pagename into fragments matched by it (used for WordIndex). For TitleIndex, we just match the whole page name, so we only get one fragment that is the same as the pagename. TODO: _make_index could get a macro on its own, more powerful / less special than WordIndex and TitleIndex. It should be able to filter for specific mimetypes, maybe match pagenames by regex (replace PageList?), etc. """ _ = self._ request = self.request fmt = self.formatter allpages = int(request.values.get('allpages', 0)) != 0 # Get page list readable by current user, filter by isSystemPage if needed if allpages: pages = request.rootpage.getPageList() else: def nosyspage(name): return not wikiutil.isSystemPage(request, name) pages = request.rootpage.getPageList(filter=nosyspage) word_re = re.compile(word_re, re.UNICODE) wordmap = {} for name in pages: for word in word_re.findall(name): try: if not wordmap[word].count(name): wordmap[word].append(name) except KeyError: wordmap[word] = [name] # Sort ignoring case tmp = [(word.upper(), word) for word in wordmap] tmp.sort() all_words = [item[1] for item in tmp] index_letters = [] current_letter = None output = [] for word in all_words: letter = wikiutil.getUnicodeIndexGroup(word) if letter != current_letter: anchor = "idx-%s" % letter output.append(fmt.anchordef(anchor)) output.append(fmt.heading(1, 2)) output.append(fmt.text(letter.replace('~', 'Others'))) output.append(fmt.heading(0, 2)) current_letter = letter if letter not in index_letters: index_letters.append(letter) links = wordmap[word] if len(links) and links[ 0] != word: # show word fragment as on WordIndex output.append(fmt.strong(1)) output.append(word) output.append(fmt.strong(0)) output.append(fmt.bullet_list(1)) links.sort() last_page = None for name in links: if name == last_page: continue output.append(fmt.listitem(1)) output.append( Page(request, name).link_to(request, attachment_indicator=1)) output.append(fmt.listitem(0)) output.append(fmt.bullet_list(0)) def _make_index_key(index_letters): index_letters.sort() def letter_link(ch): anchor = "idx-%s" % ch return fmt.anchorlink(1, anchor) + fmt.text( ch.replace('~', 'Others')) + fmt.anchorlink(0) links = [letter_link(letter) for letter in index_letters] return ' | '.join(links) page = fmt.page allpages_txt = (_('Include system pages'), _('Exclude system pages'))[allpages] allpages_url = page.url(request, querystr={'allpages': allpages and '0' or '1'}) output = [ fmt.paragraph(1), _make_index_key(index_letters), fmt.linebreak(0), fmt.url(1, allpages_url), fmt.text(allpages_txt), fmt.url(0), fmt.paragraph(0) ] + output return u''.join(output)
def _make_index(self, word_re=u".+"): """ make an index page (used for TitleIndex and WordIndex macro) word_re is a regex used for splitting a pagename into fragments matched by it (used for WordIndex). For TitleIndex, we just match the whole page name, so we only get one fragment that is the same as the pagename. TODO: _make_index could get a macro on its own, more powerful / less special than WordIndex and TitleIndex. It should be able to filter for specific mimetypes, maybe match pagenames by regex (replace PageList?), etc. """ _ = self._ request = self.request fmt = self.formatter allpages = int(request.values.get("allpages", 0)) != 0 # Get page list readable by current user, filter by isSystemPage if needed if allpages: pages = request.rootpage.getPageList() else: def nosyspage(name): return not wikiutil.isSystemPage(request, name) pages = request.rootpage.getPageList(filter=nosyspage) word_re = re.compile(word_re, re.UNICODE) wordmap = {} for name in pages: for word in word_re.findall(name): try: if not wordmap[word].count(name): wordmap[word].append(name) except KeyError: wordmap[word] = [name] # Sort ignoring case tmp = [(word.upper(), word) for word in wordmap] tmp.sort() all_words = [item[1] for item in tmp] index_letters = [] current_letter = None output = [] for word in all_words: letter = wikiutil.getUnicodeIndexGroup(word) if letter != current_letter: anchor = "idx-%s" % letter output.append(fmt.anchordef(anchor)) output.append(fmt.heading(1, 2)) output.append(fmt.text(letter.replace("~", "Others"))) output.append(fmt.heading(0, 2)) current_letter = letter if letter not in index_letters: index_letters.append(letter) links = wordmap[word] if len(links) and links[0] != word: # show word fragment as on WordIndex output.append(fmt.strong(1)) output.append(word) output.append(fmt.strong(0)) output.append(fmt.bullet_list(1)) links.sort() last_page = None for name in links: if name == last_page: continue output.append(fmt.listitem(1)) output.append(Page(request, name).link_to(request, attachment_indicator=1)) output.append(fmt.listitem(0)) output.append(fmt.bullet_list(0)) def _make_index_key(index_letters): index_letters.sort() def letter_link(ch): anchor = "idx-%s" % ch return fmt.anchorlink(1, anchor) + fmt.text(ch.replace("~", "Others")) + fmt.anchorlink(0) links = [letter_link(letter) for letter in index_letters] return " | ".join(links) page = fmt.page allpages_txt = (_("Include system pages"), _("Exclude system pages"))[allpages] allpages_url = page.url(request, querystr={"allpages": allpages and "0" or "1"}) output = [ fmt.paragraph(1), _make_index_key(index_letters), fmt.linebreak(0), fmt.url(1, allpages_url), fmt.text(allpages_txt), fmt.url(0), fmt.paragraph(0), ] + output return u"".join(output)