Beispiel #1
0
        def cook_chunks():
            for i, chunk in enumerate(WIKILINK.split(page.text)):
                # Every other chunk is a link
                if i % 2 == 0:
                    yield chunk
                    continue

                cleaned = extract_text_from_html(chunk)
                for name, title in pages:
                    if _eq_loose(title, cleaned):
                        yield '<a href="%s">%s</a>' % (name + '.html', chunk)
                        break
                else:
                    yield chunk
Beispiel #2
0
        def cook_chunks():
            for i, chunk in enumerate(WIKILINK.split(page.text)):
                # Every other chunk is a link
                if i % 2 == 0:
                    yield chunk
                    continue

                cleaned = extract_text_from_html(chunk)
                for name, title in pages:
                    if _eq_loose(title, cleaned):
                        yield '<a href="%s">%s</a>' % (name + '.html', chunk)
                        break
                else:
                    yield chunk
Beispiel #3
0
def evolve(site):
    """
    Clean up Wiki page titles so links will still work after change where
    we start stripping html from the link for purposes of matching the title
    and setting the title for new wiki pages.
    """
    catalog = find_catalog(site)
    count, docids, resolver = ICatalogSearch(site)(interfaces=[IWikiPage])
    for docid in docids:
        page = resolver(docid)
        cleaned = extract_text_from_html(page.title)
        if page.title != cleaned:
            print "Updating title for %s" % model_path(page)
            page.title = cleaned
            catalog.reindex_doc(page.docid, page)
Beispiel #4
0
def evolve(site):
    """
    Clean up Wiki page titles so links will still work after change where
    we start stripping html from the link for purposes of matching the title
    and setting the title for new wiki pages.
    """
    catalog = find_catalog(site)
    count, docids, resolver = ICatalogSearch(site)(interfaces=[IWikiPage])
    for docid in docids:
        page = resolver(docid)
        cleaned = extract_text_from_html(page.title)
        if page.title != cleaned:
            print "Updating title for %s" % resource_path(page)
            page.title = cleaned
            catalog.reindex_doc(page.docid, page)
Beispiel #5
0
    def cook(self, request):

        chunks = pattern.split(self.text)
        if len(chunks) == 1:  # fastpath
            return self.text

        subs = []

        # Every other chunk is a wiki link
        for wikilink in chunks[1::2]:
            cleaned = extract_text_from_html(wikilink)
            for page in self.__parent__.values():
                if _eq_loose(page.title, cleaned):
                    url = resource_url(page, request)
                    subs.append(WIKI_LINK % (url, wikilink))
                    break
            else:
                quoted = urllib.quote(cleaned.encode('UTF-8'))
                subs.append(ADD_WIKIPAGE_LINK % (wikilink, quoted))

        # Now join the two lists (knowing that len(text) == subs+1)
        return u''.join(_ijoin(chunks[::2], subs))
Beispiel #6
0
    def cook(self, request):

        chunks = pattern.split(self.text)
        if len(chunks) == 1:  # fastpath
            return self.text

        subs = []

        # Every other chunk is a wiki link
        for wikilink in chunks[1::2]:
            cleaned = extract_text_from_html(wikilink)
            for page in self.__parent__.values():
                if _eq_loose(page.title, cleaned):
                    url = resource_url(page, request)
                    subs.append(WIKI_LINK % (url, wikilink))
                    break
            else:
                quoted = urllib.quote(cleaned.encode("UTF-8"))
                subs.append(ADD_WIKIPAGE_LINK % (wikilink, quoted))

        # Now join the two lists (knowing that len(text) == subs+1)
        return u"".join(_ijoin(chunks[::2], subs))