示例#1
0
        def wrapper(*args, **kwargs):
            
            page = callback(*args, **kwargs)
            log.warn(page['content-type'])
            if page['content-type'] in overrides:
                page['content-type'] = overrides[page['content-type']]
            log.warn(page['content-type'])

            page['data'] = BeautifulSoup(render_markup(page['data'],page['content-type']))
            plugins.apply_all(kwargs['page'], page['data'], request=request, response=response, indexing=False)

            # TODO: normalize links, run specific plugins, etc.

            # Normalize legacy keywords
            keywords = []
            for k in [u'keywords', u'tags']:
                if k in page['headers'].keys():
                    keywords.extend(page['headers'][k].split(','))
            page['headers']['keywords'] = ','.join(list(set([k.strip() for k in keywords])))

            # Inject any per-page CSS
            if u'css' not in page['headers'].keys():
                page['headers']['css'] = None

            # clean up requested URI
            page['headers']['url'] = urlparse.urlunparse(( request.urlparts.scheme, request.urlparts.netloc, os.path.abspath(request.urlparts.path), None, None, None ))
            page['headers']['permalink'] = page['headers']['url'] + "#%s" % munge_string(page['headers']['title'])
            return page
示例#2
0
        def wrapper(*args, **kwargs):
            
            page = callback(*args, **kwargs)
            page['data'] = BeautifulSoup(render_markup(page['data'],page['content-type']))
            plugins.apply_all(kwargs['page'], page['data'], request=request, response=response, indexing=False)

            # TODO: normalize links, run plugins, etc.
            
            return page
示例#3
0
文件: acronyms.py 项目: oier/yaki-tng
    def load(self):
        # Load acronym map
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("no %s definitions" % meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash

        for line in all_sections.split('\n'):
            try:
                (acronym, expansion) = line.split(' ',1)
                self.acronyms[acronym.lower()] = expansion
            except ValueError: # skip blank lines or with more than two fields
                log.debug("skipping line '%s'" % line)
                pass
示例#4
0
    def load(self):
        # load InterWikiMap
        s = Store()
        try:
            page = s.get_page(self.wiki_map)
        except:
            log.warn("InterWikiMap: no %s definitions" % self.wiki_map)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))
        h = HTMLParser.HTMLParser()

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash
        for line in all_sections.split('\n'):
            try:
                (schema, url) = line.strip().split(' ',1)
                self.schemas[schema.lower()] = h.unescape(url) #url.replace("&amp;","&")
            except ValueError:
                log.warn("skipping line '%s'" % line)
                pass
        self.mtime = time.time()
示例#5
0
    def load(self):
        # Load acronym map
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("no %s definitions" % meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'], page['content-type']))

        all_sections = u''.join(
            map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash

        for line in all_sections.split('\n'):
            try:
                (acronym, expansion) = line.split(' ', 1)
                self.acronyms[acronym.lower()] = expansion
            except ValueError:  # skip blank lines or with more than two fields
                log.debug("skipping line '%s'" % line)
                pass
示例#6
0
    def load(self):
        # load Aliases
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("Aliases: no %s definitions" % self.meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))
        h = HTMLParser.HTMLParser()

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash
        for line in all_sections.split('\n'):
            try:
                (link, replacement) = line.strip().split(' ',1)
                self.aliases[link] = replacement
                self.aliases[link.replace('_',' ')] = replacement
            except ValueError:
                log.warn("skipping line '%s'" % line)
                pass
        self.mtime = time.time()