def parse(body):
    """Parse body and return html
    
    Create a page with body, then parse it and format using html formatter
    """
    assert body is not None

    request.reset()

    pg = Page(request, 'ThisPageDoesNotExistsAndWillNeverBeReally')
    pg.set_raw_body(body)

    from MoinMoin.formatter.text_html import Formatter
    pg.formatter = Formatter(request)
    request.formatter = pg.formatter
    pg.formatter.setPage(pg)
    pg.hilite_re = None

    output = []
    
    # Temporarily replace request.write with custom write function that 
    # write into our output object.
    def write_output(text, o=output):
        o.append(text)
    saved_write = request.write
    request.write = write_output
    try:
        Parser(body, request).format(pg.formatter)
    finally:
        request.write = saved_write

    return ''.join(output)
Пример #2
0
def macro_RandomQuote(macro, pagename=u'FortuneCookies'):
    _ = macro.request.getText

    if macro.request.user.may.read(pagename):
        page = Page(macro.request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    # this selects lines looking like a list item
    # !!! TODO: make multi-line quotes possible (optionally split by "----" or something)
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]

    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {'pagename': pagename} +
                macro.formatter.highlight(0))

    quote = random.choice(quotes)
    page.set_raw_body(quote, 1)
    quote = macro.request.redirectedOutput(page.send_page,
        content_only=1, content_id="RandomQuote")

    return quote
Пример #3
0
def macro_RandomQuote(macro, pagename=u'FortuneCookies'):
    _ = macro.request.getText

    if macro.request.user.may.read(pagename):
        page = Page(macro.request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    # this selects lines looking like a list item
    # !!! TODO: make multi-line quotes possible (optionally split by "----" or something)
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]

    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {
                    'pagename': pagename
                } + macro.formatter.highlight(0))

    quote = random.choice(quotes)
    page.set_raw_body(quote, 1)
    quote = macro.request.redirectedOutput(page.send_page,
                                           content_only=1,
                                           content_id="RandomQuote")

    return quote
Пример #4
0
def execute(macro, args):
    _ = macro.request.getText

    pagename = args or 'FortuneCookies'
    if macro.request.user.may.read(pagename):
        page = Page(macro.request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    # this selects lines looking like a list item
    # !!! TODO: make multi-line quotes possible (optionally split by "----" or something)
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]
    
    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {'pagename': pagename} +
                macro.formatter.highlight(0))
                
    quote = random.choice(quotes)
    page.set_raw_body(quote, 1)
    out = StringIO.StringIO()
    macro.request.redirect(out)
    page.send_page(macro.request, content_only=1, content_id="RandomQuote_%s" % wikiutil.quoteWikinameFS(page.page_name) )
    quote = out.getvalue()
    macro.request.redirect()
    
    return quote
Пример #5
0
def _format (src_text, request, formatter):
    # parse the text (in wiki source format) and make HTML,
    # after diverting sys.stdout to a string
    ##str_out = StringIO.StringIO ()      # create str to collect output
    ##request.redirect (str_out)          # divert output to that string
    # parse this line
    ##wiki.Parser (src_text, request).format (formatter)
    ##request.redirect ()                 # restore output
    ##return str_out.getvalue ()          # return what was generated

    page = Page(request, '__progressbar')
    page.set_raw_body(src_text, 1)
    wikitext = request.redirectedOutput(page.send_page,
        content_only=1, content_id="__progressbar")
    return wikitext
Пример #6
0
    def __init__(self, summary, desc, label, hours, mentors, difficulty, types):
        self.summary = summary
        self.label = label
        self.hours = hours
        self.mentors = mentors
        self.difficulty = difficulty
        self.types = types

        page = Page(request, "")
        page.set_raw_body(desc)
        desc = request.redirectedOutput(page.send_page, content_only=1)
        for s, r in [
                ('\n', ' '),
                (' class="line862"', ''),
                (' class="line867"', ''),
                (' class="line874"', ''),
                (' class="line891"', ''),
            ]:
            desc = desc.replace(s, r)
        self.desc = desc
Пример #7
0
    def parse(self, body):
        """Parse body and return html

        Create a page with body, then parse it and format using html formatter
        """
        request = self.request
        assert body is not None
        request.reset()
        page = Page(request, PAGENAME)
        page.hilite_re = None
        page.set_raw_body(body)
        formatter = HtmlFormatter(request)
        formatter.setPage(page)
        page.formatter = formatter
        request.formatter = formatter
        parser = WikiParser(body, request, line_anchors=False)
        formatter.startContent("")  # needed for _include_stack init
        output = request.redirectedOutput(parser.format, formatter)
        formatter.endContent("")
        return output
Пример #8
0
    def __init__(self, summary, desc, label, hours, mentors, difficulty, types):
        self.summary = summary
        self.label = label
        self.hours = hours
        self.mentors = mentors
        self.difficulty = difficulty
        self.types = types

        page = Page(request, "")
        page.set_raw_body(desc)
        desc = request.redirectedOutput(page.send_page, content_only=1)
        for s, r in [
                ('\n', ' '),
                (' class="line862"', ''),
                (' class="line867"', ''),
                (' class="line874"', ''),
                (' class="line891"', ''),
            ]:
            desc = desc.replace(s, r)
        self.desc = desc
Пример #9
0
    def parse(self, body):
        """Parse body and return html

        Create a page with body, then parse it and format using html formatter
        """
        request = self.request
        assert body is not None
        request.reset()
        page = Page(request, PAGENAME)
        page.hilite_re = None
        page.set_raw_body(body)
        formatter = HtmlFormatter(request)
        formatter.setPage(page)
        page.formatter = formatter
        request.formatter = formatter
        parser = WikiParser(body, request, line_anchors=False)
        formatter.startContent('')  # needed for _include_stack init
        output = request.redirectedOutput(parser.format, formatter)
        formatter.endContent('')
        return output
Пример #10
0
    def parse(self, body):
        """Parse body and return html

        Create a page with body, then parse it and format using html formatter
        """
        assert body is not None
        self.request.reset()
        page = Page(self.request, 'ThisPageDoesNotExistsAndWillNeverBeReally')
        page.set_raw_body(body)
        from MoinMoin.formatter.text_html import Formatter
        page.formatter = Formatter(self.request)
        self.request.formatter = page.formatter
        page.formatter.setPage(page)
        page.hilite_re = None
        
        output = StringIO()
        saved_write = self.request.write
        self.request.write = output.write
        try:
            Parser(body, self.request).format(page.formatter)
        finally:
            self.request.write = saved_write
        return output.getvalue()
Пример #11
0
def load_json_text_from_page(request, parser, page_name, parser_name):
    formatterClass = wikiutil.searchAndImportPlugin(
        request.cfg, 'formatter', 'extracting_formatter')
    extracting_formatter = formatterClass(parser_name, request)
    # Request rev (number) is only available for the requested page.
    rev = request.rev or 0 if request.page.page_name == page_name else 0
    page = Page(request, page_name, formatter=extracting_formatter, rev=rev)

    # this is so we get a correctly updated data if we just preview in the editor -
    # the new content is not stored on disk yet, but available as macro.parser.raw:
    if parser:
        format = '#format %s\n' % page.pi['format']
        page.set_raw_body(format + parser.raw, modified=1)

    if not page.isStandardPage(includeDeleted=False):
        return None
    extracting_formatter.setPage(page)
    
    # Discarding the return value
    request.redirectedOutput(
        Page.send_page_content, page, request, page.data, 'wiki')

    return extracting_formatter.get_extracted()
Пример #12
0
class MoinMoinEntryParser:
#{{{MoinMoinEntryParser class object
    '''
    This class contains evrything in order to parse a file using MoinMoin
    dervied Objects
    '''

    def __init__(self, url = '', pybloxsom_config = None):
#{{{Init method for the MoinMoinEntryParser class
        '''
        init method for MoinMoinEntryParser Object
        '''
        self.PREFORMATTER_ID = PREFORMATTER_ID

        # New MoinMoin request
        self.request = MoinMoinScriptContext(url)
        self.logger = PyblosxomTools.get_logger()
        self.url = url
        self.entryfile=''
        self.entrytitle = ''
        self.page_name = ''

        # Initial parser configuration if config is usable
        if pybloxsom_config is not None:
            self.pybloxsom_config = pybloxsom_config
            # Use moinmoin cache
            self.request.cacheable = self.request.use_cache = self.pybloxsom_config.get('moinmoin_use_cache', '0') == '1'
            #self.request.cacheable = self.request.use_cache = False

            # moinmoin encoding 
            self.output_charset = self.pybloxsom_config.get('blog_encoding', 'utf-8')
        # We don't have the config, using default
        else:
            self.request.cacheable = False
            self.output_charset = 'utf-8'
#}}}
        self.logger.debug('Moinmoin parser Object created')

    def create_page(self, entryfile = '', load_from_disk = True):
#{{{Here we create a moinmoin page object
        '''Creates a new MoinMoin Page Object.'''

        '''
        If load_from_disk is true, then the file is loaded from the disk using the entryfile as the filename.
        If not, we use the entryfile string as the raw file body
        '''
        # New MoinMoinPage
        if load_from_disk:
            page_dir, self.page_name = os.path.split(entryfile)
        else:
            self.page_name = str(hashlib.md5(entryfile).hexdigest())
        
        self.page = MoinMoinPage(self.request, page_name=self.page_name, formatter=None)

        # Load New page from file
        if load_from_disk:
            self.page.__body = None
            self.page._text_filename_force = entryfile
            self.logger.debug('Entry file to convert: %s' % entryfile)
        # Extracting page title
            tmp_body = self.page.get_body()
            self.entrytitle = tmp_body.split('\n')[0]
            self.page.set_raw_body(tmp_body[len(self.entrytitle)+1:])
            del tmp_body
        else:
            self.page.set_raw_body(entryfile)
            self.logger.debug('Using data got from pyblosxom')

        self.page.hilite_re = None
        self.page.output_charset = self.output_charset
        self.parser = None
        self.formatter = None
        self.default_parser = 'wiki'
        self.output_format = u'text/html'
        self.request.page = self.page
        self.page.__pi = self.page.parse_processing_instructions()
        self.logger.debug('New moinmoin page created')
#}}}

    def create_parser(self, line_anchors = False):
#{{{Here we create a moinmoin paser object
        '''Creating parser object'''
        
        Parser = MoinMoinWikiutil.searchAndImportPlugin(self.request.cfg, 'parser', self.request.getPragma('format', self.default_parser.lower()))
        self.parser = Parser(self.page.get_body(), self.request, line_anchors = line_anchors)
        self.logger.debug('New moinmoin parser created')
#}}}

    def create_formatter(self):
#{{{Here we create a moinmoin formatter object
        '''Creating formatter object'''

        Formatter = MoinMoinWikiutil.searchAndImportPlugin(self.request.cfg, 'formatter', self.request.getPragma('output-format', self.output_format).lower())

        self.formatter = Formatter(self.request)
        self.formatter.setPage(self.page)
        self.request.formatter = self.page.formatter = self.formatter
        self.logger.debug('New moinmoin formatter created')
#}}}

    def parse_entry(self, content_only = 1):
#{{{parsing the entry file return the result
        '''parse the entryfile and return the result string'''
        return self.request.redirectedOutput(self.page.send_page,content_id=self.page_name, content_only = content_only, do_cache=self.request.cacheable)
Пример #13
0
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M), called_by_toc=0):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text,))

    # prepare including page
    result = []
    print_mode = macro.form.has_key('action') and macro.form['action'][0] == "print"
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(request, this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if this_page._macroInclude_pagelist.has_key(inc_name):
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name,))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error, e:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            newbody = []
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if called_by_toc:
            result.append(inc_page.get_raw_body())
            continue

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name
        
        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title(request)
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                import sha
                from MoinMoin import config
                # this heading id might produce duplicate ids,
                # if the same page is included multiple times
                # Encode stuf we feed into sha module.
                pntt = (inc_name + heading).encode(config.charset)
                hid = "head-" + sha.new(pntt).hexdigest()
                request._page_headings.setdefault(pntt, 0)
                request._page_headings[pntt] += 1
                if request._page_headings[pntt] > 1:
                    hid += '-%d'%(request._page_headings[pntt],)
                result.append(
                    #macro.formatter.heading(1, level, hid,
                    #    icons=edit_icon.replace('<img ', '<img align="right" ')) +
                    macro.formatter.heading(1, level, hid) +
                    inc_page.link_to(request, heading, css_class="include-heading-link") +
                    macro.formatter.heading(0, level)
                )

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            cid = request.makeUniqueID("Include_%s" % wikiutil.quoteWikinameFS(inc_page.page_name))
            inc_page.send_page(request, content_only=1, content_id=cid)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                '<div class="include-link">',
                inc_page.link_to(request, '[%s]' % (inc_name,), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'),), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                '</div>',
            ])
Пример #14
0
def macro_TOC(macro, maxdepth=int):
    """
Prints a table of contents.

 maxdepth:: maximum depth the table of contents is generated for (defaults to unlimited)
    """
    if maxdepth is None:
        maxdepth = 99

    pname = macro.formatter.page.page_name

    macro.request.uid_generator.push()

    macro.request._tocfm_collected_headings = []
    macro.request._tocfm_orig_formatter = macro.formatter

    tocfm = TOCFormatter(macro.request)
    p = Page(macro.request, pname, formatter=tocfm, rev=macro.request.rev)

    # this is so we get a correctly updated TOC if we just preview in the editor -
    # the new content is not stored on disk yet, but available as macro.parser.raw:
    p.set_raw_body(macro.parser.raw, modified=1)

    output = macro.request.redirectedOutput(p.send_page,
                                            content_only=True,
                                            count_hit=False,
                                            omit_footnotes=True)

    _ = macro.request.getText

    result = [
        macro.formatter.div(1, css_class="table-of-contents"),
        macro.formatter.paragraph(1, css_class="table-of-contents-heading"),
        macro.formatter.text(_('Contents')),
        macro.formatter.paragraph(0),
    ]

    # find smallest used level and use that as the outer-most indentation,
    # to fix pages like HelpOnMacros that only use h2 and lower levels.
    lastlvl = 100
    for lvl, id, txt in macro.request._tocfm_collected_headings:
        if txt is None:
            incl_id = id
            continue
        if lvl > maxdepth or id is None:
            continue
        if lvl < lastlvl:
            lastlvl = lvl

    # headings are 1-based, lastlvl needs to be one less so that one is closed
    lastlvl -= 1

    for lvl, id, txt in macro.request._tocfm_collected_headings:
        if txt is None:
            incl_id = id
            continue
        if lvl > maxdepth or id is None:
            continue

        # will be reset by pop_unique_ids below
        macro.request.uid_generator.include_id = incl_id

        need_li = lastlvl >= lvl
        while lastlvl > lvl:
            result.extend([
                macro.formatter.listitem(0),
                macro.formatter.number_list(0),
            ])
            lastlvl -= 1
        while lastlvl < lvl:
            result.extend([
                macro.formatter.number_list(1),
                macro.formatter.listitem(1),
            ])
            lastlvl += 1
        if need_li:
            result.extend([
                macro.formatter.listitem(0),
                macro.formatter.listitem(1),
            ])
        result.extend([
            '\n',
            macro.formatter.anchorlink(1, id),
            macro.formatter.text(txt),
            macro.formatter.anchorlink(0),
        ])

    while lastlvl > 0:
        result.append(macro.formatter.listitem(0))
        result.append(macro.formatter.number_list(0))
        lastlvl -= 1

    macro.request.uid_generator.pop()

    result.append(macro.formatter.div(0))
    return ''.join(result)
Пример #15
0
def macro_TOC(macro, maxdepth=int):
    """
Prints a table of contents.

 maxdepth:: maximum depth the table of contents is generated for (defaults to unlimited)
    """
    if maxdepth is None:
        maxdepth = 99

    pname = macro.formatter.page.page_name

    macro.request.uid_generator.push()

    macro.request._tocfm_collected_headings = []
    macro.request._tocfm_orig_formatter = macro.formatter

    tocfm = TOCFormatter(macro.request)
    p = Page(macro.request, pname, formatter=tocfm, rev=macro.request.rev)

    # this is so we get a correctly updated TOC if we just preview in the editor -
    # the new content is not stored on disk yet, but available as macro.parser.raw:
    p.set_raw_body(macro.parser.raw, modified=1)

    output = macro.request.redirectedOutput(p.send_page,
                                            content_only=True,
                                            count_hit=False,
                                            omit_footnotes=True)

    _ = macro.request.getText

    result = [
        macro.formatter.div(1, css_class="table-of-contents"),
        macro.formatter.paragraph(1, css_class="table-of-contents-heading"),
        macro.formatter.text(_('Contents')),
        macro.formatter.paragraph(0),
    ]


    # find smallest used level and use that as the outer-most indentation,
    # to fix pages like HelpOnMacros that only use h2 and lower levels.
    lastlvl = 100
    for lvl, id, txt in macro.request._tocfm_collected_headings:
        if txt is None:
            incl_id = id
            continue
        if lvl > maxdepth or id is None:
            continue
        if lvl < lastlvl:
            lastlvl = lvl

    # headings are 1-based, lastlvl needs to be one less so that one is closed
    lastlvl -= 1

    for lvl, id, txt in macro.request._tocfm_collected_headings:
        if txt is None:
            incl_id = id
            continue
        if lvl > maxdepth or id is None:
            continue

        # will be reset by pop_unique_ids below
	macro.request.uid_generator.include_id = incl_id

        need_li = lastlvl >= lvl
        while lastlvl > lvl:
            result.extend([
                macro.formatter.listitem(0),
                macro.formatter.number_list(0),
            ])
            lastlvl -= 1
        while lastlvl < lvl:
            result.extend([
                macro.formatter.number_list(1),
                macro.formatter.listitem(1),
            ])
            lastlvl += 1
        if need_li:
            result.extend([
                macro.formatter.listitem(0),
                macro.formatter.listitem(1),
            ])
        result.extend([
            '\n',
            macro.formatter.anchorlink(1, id),
            macro.formatter.text(txt),
            macro.formatter.anchorlink(0),
        ])

    while lastlvl > 0:
        result.append(macro.formatter.listitem(0))
        result.append(macro.formatter.number_list(0))
        lastlvl -= 1

    macro.request.uid_generator.pop()

    result.append(macro.formatter.div(0))
    return ''.join(result)
Пример #16
0
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M)):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = text and args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text, ))

    # prepare including page
    result = []
    print_mode = request.action in ("print", "format")
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if inc_name in this_page._macroInclude_pagelist:
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name, ))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        if not inc_page.exists():
            continue
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name

        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title()
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                url = inc_page.url(request)
                result.extend([
                    macro.formatter.heading(1, level, id=heading),
                    macro.formatter.url(1, url, css="include-heading-link"),
                    macro.formatter.text(heading),
                    macro.formatter.url(0),
                    macro.formatter.heading(0, level),
                ])

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            inc_page.send_page(content_only=True,
                               omit_footnotes=True,
                               count_hit=False)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                macro.formatter.div(1, css_class="include-link"),
                inc_page.link_to(request, '[%s]' % (inc_name, ), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                macro.formatter.div(0),
            ])
        # XXX page.link_to is wrong now, it escapes the edit_icon html as it escapes normal text

    # return include text
    return ''.join(result)
Пример #17
0
def do_diff(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page(request)
        return

    try:
        date = request.form['date'][0]
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = request.form['rev1'][0]
        try:
            rev1 = int(rev1)
        except StandardError:
            rev1 = 0
    except KeyError:
        rev1 = -1

    try:
        rev2 = request.form['rev2'][0]
        try:
            rev2 = int(rev2)
        except StandardError:
            rev2 = 0
    except KeyError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        try:
            rev1 = request.form['rev'][0]
            try:
                rev1 = int(rev1)
            except StandardError:
                rev1 = -1
        except KeyError:
            rev1 = -1
 
    # spacing flag?
    try:
        ignorews = int(request.form['ignorews'][0])
    except (KeyError, ValueError, TypeError):
        ignorews = 0

    _ = request.getText
    
    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    revisions = currentpage.getRevList()
    if len(revisions) < 2:
        currentpage.send_page(request, msg=_("No older revisions available!"))
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    # Start output
    # This action generate content in the user language
    request.setContentLanguage(request.lang)

    request.http_headers()
    wikiutil.send_title(request, _('Diff for "%s"') % (pagename,), pagename=pagename)
  
    if (rev1>0 and rev2>0 and rev1>rev2) or (rev1==0 and rev2>0):
        rev1,rev2 = rev2,rev1
          
    oldrev1,oldcount1 = None,0
    oldrev2,oldcount2 = None,0
    # get the filename of the version to compare to
    edit_count = 0
    for rev in revisions:
        edit_count += 1
        if rev <= rev1: 
            oldrev1,oldcount1 = rev,edit_count
        if rev2 and rev >= rev2: 
            oldrev2,oldcount2 = rev,edit_count
        if (oldrev1 and oldrev2) or (oldrev1 and not rev2):
            break
    
    if rev1 == -1:
        oldpage = Page(request, pagename, rev=revisions[1])
        oldcount1 = oldcount1 - 1
    elif rev1 == 0:
        oldpage = currentpage
        # oldcount1 is still on init value 0
    else:
        if oldrev1:
            oldpage = Page(request, pagename, rev=oldrev1)
        else:
            oldpage = Page(request, "$EmptyPage$") # hack
            oldpage.set_raw_body("")    # avoid loading from disk
            oldrev1 = 0 # XXX
              
    if rev2 == 0:
        newpage = currentpage
        # oldcount2 is still on init value 0
    else:
        if oldrev2:
            newpage = Page(request, pagename, rev=oldrev2)
        else:
            newpage = Page(request, "$EmptyPage$") # hack
            newpage.set_raw_body("")    # avoid loading from disk
            oldrev2 = 0 # XXX
    
    edit_count = abs(oldcount1 - oldcount2)

    # this should use the formatter, but there is none?
    request.write('<div id="content">\n') # start content div
    request.write('<p class="diff-header">')
    request.write(_('Differences between revisions %d and %d') % (oldpage.get_real_rev(), newpage.get_real_rev()))
    if edit_count > 1:
        request.write(' ' + _('(spanning %d versions)') % (edit_count,))
    request.write('</p>')
  
    if request.user.show_fancy_diff:
        from MoinMoin.util.diff import diff
        request.write(diff(request, oldpage.get_raw_body(), newpage.get_raw_body()))
        newpage.send_page(request, count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        lines = wikiutil.linediff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = _("No differences found!")
            if edit_count > 1:
                msg = msg + '<p>' + _('The page was saved %(count)d times, though!') % {
                    'count': edit_count}
            request.write(msg)
        else:
            if ignorews:
                request.write(_('(ignoring whitespace)') + '<br>')
            else:
                qstr = 'action=diff&ignorews=1'
                if rev1: qstr = '%s&rev1=%s' % (qstr, rev1)
                if rev2: qstr = '%s&rev2=%s' % (qstr, rev2)
                request.write(Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr) + '<p>')

            request.write('<pre>')
            for line in lines:
                if line[0] == "@":
                    request.write('<hr>')
                request.write(wikiutil.escape(line)+'\n')
            request.write('</pre>')

    request.write('</div>\n') # end content div
    wikiutil.send_footer(request, pagename, showpage=1)
Пример #18
0
def macro_Slider(macro,
                 pagename=u'SliderContent',
                 width=u'900px',
                 height=u'100px',
                 interval=u'3000'):
    """
    @param pagename: the pagename for the list to cycle through.
    """
    f = macro.formatter
    request = macro.request
    _ = request.getText

    if request.user.may.read(pagename):
        page = Page(request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    username = request.user.name or 'Anonymous'
    # this selects lines looking like a list item
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]
    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {
                    'pagename': pagename
                } + macro.formatter.highlight(0))

    name = pagename.lower().replace('/', '_')

    result = []
    result.append(
        f.rawHTML(u'<script type="text/javascript" ' +
                  'src="%s/common/js/jssor.slider.min.js"></script>' %
                  request.cfg.url_prefix_static))
    result.append(
        f.rawHTML(
            u'<script>slider_%s_starter = function (id) { var options = { $AutoPlay: true, $AutoPlayInterval: %s }; var slider_%s = new $JssorSlider$(id, options); };</script>'
            % (name, interval, name)))

    result.append(
        f.rawHTML(
            u'<div id="slider_%s_container" style="position: relative; top: 0px; left: 0px; width: %s; height: %s;">'
            % (name, width, height)))
    result.append(
        f.rawHTML(
            u'<div u="slides" style="cursor: move; position: absolute; left: 0px; top: 0px; width: %s; height: %s; overflow: hidden;">'
            % (width, height)))

    for quote in quotes:
        if quote.startswith('[[') and quote.endswith(']]'):
            quote = quote[2:-2]
        page.set_raw_body(Page(request, quote).get_raw_body(), 1)
        text = request.redirectedOutput(page.send_page,
                                        content_only=1,
                                        content_id="Slider")
        result.append(f.rawHTML('<div style="visiblity: hidden">'))
        result.append(f.rawHTML(text))
        result.append(f.div(0))

    result.append(f.div(0))
    result.append(
        f.rawHTML('<script>slider_' + name + '_starter("slider_' + name +
                  '_container");</script>'))
    result.append(f.div(0))

    return ''.join(result)