Beispiel #1
0
    def xmlrpc_getPageVersion(self, pagename, rev):
        """
        Get raw text from specific revision of pagename

        @param pagename: pagename (utf-8)
        @param rev: revision number (int)
        @rtype: str
        @return: utf-8 encoded page data
        """
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pagename, rev=rev)
        else:
            page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Return page raw text
        if self.version == 2:
            return self._outstr(page.get_raw_body())
        elif self.version == 1:
            return self._outlob(page.get_raw_body())
 def retrain(self, request):
     self.close_spamdb()
     if os.path.exists(self.spam_db):
         os.unlink(self.spam_db)
     self.open_spamdb(request)
     nham = nspam = 0
     for url in Group(request, "HamPages").members():
         scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
         rev = 0
         for pair in query.split("&"):
             key, val = pair.split("=")
             if key == "rev":
                 raw = int(val)
                 break
         pg = Page(request, path[1:], rev=rev)
         self.sbayes.train_ham(pg.get_raw_body())
         nham += 1
     for url in Group(request, "SpamPages").members():
         scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
         rev = 0
         for pair in query.split("&"):
             key, val = pair.split("=")
             if key == "rev":
                 raw = int(val)
                 break
         pg = Page(request, path[1:], rev=rev)
         self.sbayes.train_spam(pg.get_raw_body())
         nspam += 1
     self.close_spamdb()
     return (nham, nspam)
Beispiel #3
0
    def xmlrpc_getPageVersion(self, pagename, rev):
        """ Get raw text from specific revision of pagename
        
        @param pagename: pagename (utf-8)
        @param rev: revision number (int)
        @rtype: str
        @return: utf-8 encoded page data
        """    
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        if rev != None:
            page = Page(self.request, pagename, rev=rev)
        else:
            page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Return page raw text
        if self.version == 2:
            return self._outstr(page.get_raw_body())
        elif self.version == 1:
            return self._outlob(page.get_raw_body())
 def retrain(self, request):
     self.close_spamdb()
     if os.path.exists(self.spam_db):
         os.unlink(self.spam_db)
     self.open_spamdb(request)
     nham = nspam = 0
     for url in Group(request, "HamPages").members():
         scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
         rev = 0
         for pair in query.split("&"):
             key, val = pair.split("=")
             if key == "rev":
                 raw = int(val)
                 break
         pg = Page(request, path[1:], rev=rev)
         self.sbayes.train_ham(pg.get_raw_body())
         nham += 1
     for url in Group(request, "SpamPages").members():
         scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
         rev = 0
         for pair in query.split("&"):
             key, val = pair.split("=")
             if key == "rev":
                 raw = int(val)
                 break
         pg = Page(request, path[1:], rev=rev)
         self.sbayes.train_spam(pg.get_raw_body())
         nspam += 1
     self.close_spamdb()
     return (nham, nspam)
Beispiel #5
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        cnt = 0
        script = [packLine(['MoinMoinPackage', '1']), ]

        for pagename in pagelist:
            pagename = pagename.strip()
            page = Page(request, pagename)
            if page.exists():
                cnt += 1
                script.append(packLine([function, str(cnt), pagename]))
                timestamp = wikiutil.version2timestamp(page.mtime_usecs())
                zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
                zi.compress_type = COMPRESSION_LEVEL
                zf.writestr(zi, page.get_raw_body().encode("utf-8"))
            else:
                #import sys
                #print >>sys.stderr, "Could not find the page %s." % pagename.encode("utf-8")
                pass

        script += [packLine(['Print', 'Installed MoinMaster page bundle %s.' % os.path.basename(filename)])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
def execute(macro, args):
    _ = macro.request.getText

    pagename = args or 'FortuneCookies'
    if macro.request.user.may.read(pagename):
        page = Page(macro.request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    # this selects lines looking like a list item
    # !!! TODO: make multi-line quotes possible (optionally split by "----" or something)
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]
    
    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {'pagename': pagename} +
                macro.formatter.highlight(0))
                
    quote = random.choice(quotes)
    page.set_raw_body(quote, 1)
    out = StringIO.StringIO()
    macro.request.redirect(out)
    page.send_page(macro.request, content_only=1, content_id="RandomQuote_%s" % wikiutil.quoteWikinameFS(page.page_name) )
    quote = out.getvalue()
    macro.request.redirect()
    
    return quote
Beispiel #7
0
    def include(self, name, arguments, options, content, lineno,
                content_offset, block_text, state, state_machine):
        # content contains the included file name

        _ = self.request.getText

        if len(content):
            if content[0] in self.included_documents:
                lines = [_("**Duplicate included files are not permitted**")]
                state_machine.insert_input(lines, 'MoinDirectives')
                return
            self.included_documents.append(content[0])
            page = Page(page_name=content[0], request=self.request)
            if page.exists():
                text = page.get_raw_body()
                lines = text.split('\n')
                # Remove the "#format rst" line
                if lines[0].startswith("#format"):
                    del lines[0]
            else:
                lines = [
                    _("**Could not find the referenced page: %s**") %
                    (content[0], )
                ]
            # Insert the text from the included document and then continue
            # parsing
            state_machine.insert_input(lines, 'MoinDirectives')
        return
Beispiel #8
0
    def include(self, name, arguments, options, content, lineno,
                content_offset, block_text, state, state_machine):
        # content contains the included file name

        _ = self.request.getText

        # Limit the number of documents that can be included
        if self.num_includes < self.max_includes:
            self.num_includes += 1
        else:
            lines = [_("**Maximum number of allowed includes exceeded**")]
            state_machine.insert_input(lines, 'MoinDirectives')
            return

        if len(content):
            pagename = content[0]
            page = Page(page_name=pagename, request=self.request)
            if not self.request.user.may.read(pagename):
                lines = [_("**You are not allowed to read the page: %s**") % (pagename, )]
            else:
                if page.exists():
                    text = page.get_raw_body()
                    lines = text.split('\n')
                    # Remove the "#format rst" line
                    if lines[0].startswith("#format"):
                        del lines[0]
                else:
                    lines = [_("**Could not find the referenced page: %s**") % (pagename, )]
            # Insert the text from the included document and then continue parsing
            state_machine.insert_input(lines, 'MoinDirectives')
        return
Beispiel #9
0
 def include(self, name, arguments, options, content, lineno,
             content_offset, block_text, state, state_machine):
     # content contains the included file name
     
     _ = self.request.getText
     
     if len(content):
         if content[0] in self.included_documents:
             lines = [_("**Duplicate included files are not permitted**")]
             state_machine.insert_input(lines, 'MoinDirectives')
             return
         self.included_documents.append(content[0])
         page = Page(page_name = content[0], request = self.request)
         if page.exists():
             text = page.get_raw_body()
             lines = text.split('\n')
             # Remove the "#format rst" line
             if lines[0].startswith("#format"):
                 del lines[0]
         else:
             lines = [_("**Could not find the referenced page: %s**") % (content[0],)]
         # Insert the text from the included document and then continue
         # parsing
         state_machine.insert_input(lines, 'MoinDirectives')
     return
    def copypage(self, request, rootdir, pagename):
        """ quick and dirty! """
        pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename))
        os.makedirs(pagedir)

        # write a "current" file with content "00000001"
        revstr = '%08d' % 1
        cf = os.path.join(pagedir, 'current')
        file(cf, 'w').write(revstr+'\n')

        # create a single revision 00000001
        revdir = os.path.join(pagedir, 'revisions')
        os.makedirs(revdir)
        tf = os.path.join(revdir, revstr)
        p = Page(request, pagename)
        text = p.get_raw_body().replace("\n", "\r\n")
        codecs.open(tf, 'wb', config.charset).write(text)

        source_dir = AttachFile.getAttachDir(request, pagename)
        if os.path.exists(source_dir):
            dest_dir = os.path.join(pagedir, "attachments")
            os.makedirs(dest_dir)
            for filename in os.listdir(source_dir):
                source_file = os.path.join(source_dir, filename)
                dest_file = os.path.join(dest_dir, filename)
                shutil.copyfile(source_file, dest_file)
Beispiel #11
0
    def copypage(self, request, rootdir, pagename):
        """ quick and dirty! """
        pagedir = os.path.join(rootdir, 'pages',
                               wikiutil.quoteWikinameFS(pagename))
        os.makedirs(pagedir)

        # write a "current" file with content "00000001"
        revstr = '%08d' % 1
        cf = os.path.join(pagedir, 'current')
        file(cf, 'w').write(revstr + '\n')

        # create a single revision 00000001
        revdir = os.path.join(pagedir, 'revisions')
        os.makedirs(revdir)
        tf = os.path.join(revdir, revstr)
        p = Page(request, pagename)
        text = p.get_raw_body().replace("\n", "\r\n")
        codecs.open(tf, 'wb', config.charset).write(text)

        source_dir = AttachFile.getAttachDir(request, pagename)
        if os.path.exists(source_dir):
            dest_dir = os.path.join(pagedir, "attachments")
            os.makedirs(dest_dir)
            for filename in os.listdir(source_dir):
                source_file = os.path.join(source_dir, filename)
                dest_file = os.path.join(dest_dir, filename)
                shutil.copyfile(source_file, dest_file)
Beispiel #12
0
def macro_RandomQuote(macro, pagename=u'FortuneCookies'):
    _ = macro.request.getText

    if macro.request.user.may.read(pagename):
        page = Page(macro.request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    # this selects lines looking like a list item
    # !!! TODO: make multi-line quotes possible (optionally split by "----" or something)
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]

    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {
                    'pagename': pagename
                } + macro.formatter.highlight(0))

    quote = random.choice(quotes)
    page.set_raw_body(quote, 1)
    quote = macro.request.redirectedOutput(page.send_page,
                                           content_only=1,
                                           content_id="RandomQuote")

    return quote
Beispiel #13
0
def macro_RandomQuote(macro, pagename=u'FortuneCookies'):
    _ = macro.request.getText

    if macro.request.user.may.read(pagename):
        page = Page(macro.request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    # this selects lines looking like a list item
    # !!! TODO: make multi-line quotes possible (optionally split by "----" or something)
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]

    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {'pagename': pagename} +
                macro.formatter.highlight(0))

    quote = random.choice(quotes)
    page.set_raw_body(quote, 1)
    quote = macro.request.redirectedOutput(page.send_page,
        content_only=1, content_id="RandomQuote")

    return quote
Beispiel #14
0
    def collectpackage(self, pagelist, fileobject, pkgname="", include_attachments=False):
        """ Expects a list of pages as an argument, and fileobject to be an open
        file object, which a zipfile will get written to.

        @param pagelist: pages to package
        @param fileobject: open file object to write to
        @param pkgname: optional file name, to prevent self packaging
        @rtype: string or None
        @return: error message, if one happened
        @rtype: boolean
        @param include_attachments: True if you want attachments collected
        """
        _ = self.request.getText
        COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED

        pages = []
        for pagename in pagelist:
            pagename = wikiutil.normalize_pagename(pagename, self.request.cfg)
            if pagename:
                page = Page(self.request, pagename)
                if page.exists() and self.request.user.may.read(pagename):
                    pages.append(page)
        if not pages:
            return (_('No pages like "%s"!') % wikiutil.escape(pagelist))

        # Set zipfile output
        zf = zipfile.ZipFile(fileobject, "w", COMPRESSION_LEVEL)

        cnt = 0
        userid = user.getUserIdentification(self.request)
        script = [packLine(['MoinMoinPackage', '1']), ]

        for page in pages:
            cnt += 1
            files = _get_files(self.request, page.page_name)
            script.append(packLine(["AddRevision", str(cnt), page.page_name, userid, "Created by the PackagePages action."]))

            timestamp = wikiutil.version2timestamp(page.mtime_usecs())

            # avoid getting strange exceptions from zipfile in case of pre-1980 timestamps
            nineteeneighty = (10 * 365 + 3) * 24 * 3600 # 1970 + 10y + 3d
            timestamp = max(nineteeneighty, timestamp) # zip can not store timestamps before 1980

            zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))
            if include_attachments:
                for attname in files:
                    if attname != pkgname:
                        cnt += 1
                        zipname = "%d_attachment" % cnt
                        script.append(packLine(["AddAttachment", zipname, attname, page.page_name, userid, "Created by the PackagePages action."]))
                        filename = AttachFile.getFilename(self.request, page.page_name, attname)
                        zf.write(filename, zipname)
        script += [packLine(['Print', 'Thank you for using PackagePages!'])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
Beispiel #15
0
def load_template(request, name):
    if not request.user.may.read(name):
        raise InviteException("You are not allowed to read template page '%s'." % name)

    page = Page(request, name)
    if not page.exists():
        raise InviteException("Template page '%s' does not exist." % name)

    return page.get_raw_body()
Beispiel #16
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        # page LanguageSetup needs no packing!
        existing_pages = [
            pagename for pagename in pagelist
            if Page(request, pagename).exists() and pagename != 'LanguageSetup'
        ]
        if not existing_pages:
            return

        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        script = [
            packLine(['MoinMoinPackage', '1']),
        ]

        cnt = 0
        for pagename in existing_pages:
            pagename = pagename.strip()
            page = Page(request, pagename)
            files = _get_files(request, pagename)
            for attname in files:
                cnt += 1
                zipname = "%d" % cnt
                script.append(
                    packLine([
                        "ReplaceUnderlayAttachment", zipname, attname, pagename
                    ]))
                attpath = AttachFile.getFilename(request, pagename, attname)
                zf.write(attpath, zipname)

            cnt += 1
            zipname = "%d" % cnt
            script.append(packLine([function, zipname, pagename]))
            timestamp = wikiutil.version2timestamp(page.mtime_usecs())
            zi = zipfile.ZipInfo(
                filename=zipname,
                date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))

        script += [
            packLine([
                'Print',
                'Installed MoinMaster page bundle %s.' %
                os.path.basename(filename)
            ])
        ]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
Beispiel #17
0
def load_template(request, name):
    if not request.user.may.read(name):
        raise InviteException(
            "You are not allowed to read template page '%s'." % name)

    page = Page(request, name)
    if not page.exists():
        raise InviteException("Template page '%s' does not exist." % name)

    return page.get_raw_body()
Beispiel #18
0
    def _index_page_rev(self,
                        request,
                        connection,
                        pagename,
                        revno,
                        mode='update'):
        """ Index a page revision.

        @param request: request suitable for indexing
        @param connection: the Indexer connection object
        @param pagename: the page name
        @param revno: page revision number (int)
        @param mode: 'add' = just add, no checks
                     'update' = check if already in index and update if needed (mtime)
        """
        page = Page(request, pagename, rev=revno)
        request.page = page  # XXX for what is this needed?

        wikiname = request.cfg.interwikiname or u"Self"
        revision = str(page.get_real_rev())
        itemid = "%s:%s:%s" % (wikiname, pagename, revision)
        mtime = page.mtime_usecs()

        doc = self._get_document(connection, itemid, mtime, mode)
        logging.debug("%s %s %r" % (pagename, revision, doc))
        if doc:
            mimetype = 'text/%s' % page.pi['format']  # XXX improve this

            fields = {}
            fields['wikiname'] = wikiname
            fields['pagename'] = pagename
            fields['attachment'] = ''  # this is a real page, not an attachment
            fields['mtime'] = str(mtime)
            fields['revision'] = revision
            fields['title'] = pagename
            fields['content'] = page.get_raw_body()
            fields['lang'], fields['stem_lang'] = self._get_languages(page)
            fields['author'] = page.edit_info().get('editor', '?')

            multivalued_fields = {}
            multivalued_fields['mimetype'] = [
                mt for mt in [mimetype] + mimetype.split('/')
            ]
            multivalued_fields['domain'] = self._get_domains(page)
            multivalued_fields['linkto'] = page.getPageLinks(request)
            multivalued_fields['category'] = self._get_categories(page)

            self._add_fields_to_document(request, doc, fields,
                                         multivalued_fields)

            try:
                connection.replace(doc)
            except xappy.IndexerError, err:
                logging.warning("IndexerError at %r %r %r (%s)" %
                                (wikiname, pagename, revision, str(err)))
    def _get_page_body(self, page, this_page):
        """Return the content of a named page; accepts relative pages"""

        if page.startswith("/") or len(page) == 0:
            page = this_page + page

        p = Page(self.request, page)
        if not p.exists():
            raise RuntimeError("Page '%s' not found" % page)
        else:
            return p.get_raw_body().split('\n')
    def save(self, editor, newtext, rev, **kw):
        BLACKLISTPAGES = ["BadContent", "LocalBadContent"]
        if not editor.page_name in BLACKLISTPAGES:
            request = editor.request

            # Start timing of antispam operation
            request.clock.start('antispam')

            blacklist = []
            latest_mtime = 0
            for pn in BLACKLISTPAGES:
                do_update = (pn != "LocalBadContent" and
                             request.cfg.interwikiname != 'MoinMaster') # MoinMaster wiki shall not fetch updates from itself
                blacklist_mtime, blacklist_entries = getblacklist(request, pn, do_update)
                blacklist += blacklist_entries
                latest_mtime = max(latest_mtime, blacklist_mtime)

            if blacklist:
                invalid_cache = not getattr(request.cfg.cache, "antispam_blacklist", None)
                if invalid_cache or request.cfg.cache.antispam_blacklist[0] < latest_mtime:
                    mmblcache = []
                    for blacklist_re in blacklist:
                        try:
                            mmblcache.append(re.compile(blacklist_re, re.I))
                        except re.error, err:
                            logging.error("Error in regex '%s': %s. Please check the pages %s." % (
                                          blacklist_re,
                                          str(err),
                                          ', '.join(BLACKLISTPAGES)))
                    request.cfg.cache.antispam_blacklist = (latest_mtime, mmblcache)

                from MoinMoin.Page import Page

                oldtext = ""
                if rev > 0: # rev is the revision of the old page
                    page = Page(request, editor.page_name, rev=rev)
                    oldtext = page.get_raw_body()

                newset = frozenset(newtext.splitlines(1))
                oldset = frozenset(oldtext.splitlines(1))
                difference = newset - oldset
                addedtext = kw.get('comment', u'') + u''.join(difference)

                for blacklist_re in request.cfg.cache.antispam_blacklist[1]:
                    match = blacklist_re.search(addedtext)
                    if match:
                        # Log error and raise SaveError, PageEditor should handle this.
                        _ = editor.request.getText
                        msg = _('Sorry, can not save page because "%(content)s" is not allowed in this wiki.') % {
                                  'content': wikiutil.escape(match.group())
                              }
                        logging.info(msg)
                        raise editor.SaveError(msg)
            request.clock.stop('antispam')
Beispiel #21
0
 def _retrieve_items(self, dict_name):
     # XXX in Moin 2.0 regex should not be used instead use DOM
     # tree to extract dict values. Also it should be possible to
     # convert dict values to different markups (wiki-markup,
     # creole...).
     #
     # Note that formatter which extracts dictionary from a
     # page was developed. See
     # http://hg.moinmo.in/moin/1.9-groups-dmilajevs/file/982f706482e7/MoinMoin/formatter/dicts.py
     page = Page(self.request, dict_name)
     text = page.get_raw_body()
     return dict([match.groups() for match in self._dict_page_parse_regex.finditer(text)])
Beispiel #22
0
    def save(self, editor, newtext, rev, **kw):
        BLACKLISTPAGES = ["BadContent", "LocalBadContent"]
        if not editor.page_name in BLACKLISTPAGES:
            request = editor.request

            # Start timing of antispam operation
            request.clock.start('antispam')
            
            blacklist = []
            invalidate_cache = not getattr(request.cfg, "_mmblcache", None)
            for pn in BLACKLISTPAGES:
                do_update = (pn != "LocalBadContent")
                invalidate_cache_necessary, blacklist_entries = getblacklist(request, pn, do_update)
                blacklist += blacklist_entries
                invalidate_cache |= invalidate_cache_necessary

            if blacklist:
                if invalidate_cache:
                    mmblcache = []
                    for blacklist_re in blacklist:
                        try:
                            mmblcache.append(re.compile(blacklist_re, re.I))
                        except re.error, err:
                            dprint("Error in regex '%s': %s. Please check the pages %s." % (blacklist_re, str(err), ', '.join(BLACKLISTPAGES)))
                            continue
                    request.cfg._mmblcache = mmblcache

                from MoinMoin.Page import Page

                oldtext = ""
                if rev > 0: # rev is the revision of the old page
                    page = Page(request, editor.page_name, rev=rev)
                    oldtext = page.get_raw_body()

                newset = sets.ImmutableSet(newtext.splitlines(1))
                oldset = sets.ImmutableSet(oldtext.splitlines(1))
                difference = newset.difference(oldset)
                addedtext = ''.join(difference) 
                
                for blacklist_re in request.cfg._mmblcache:
                    match = blacklist_re.search(addedtext)
                    if match:
                        # Log error and raise SaveError, PageEditor
                        # should handle this.
                        _ = editor.request.getText
                        msg = _('Sorry, can not save page because "%(content)s"'
                                ' is not allowed in this wiki.') % {
                            'content': match.group()
                            }
                        dprint(msg)
                        raise editor.SaveError(msg)
            request.clock.stop('antispam')
Beispiel #23
0
	def prepend_to_wiki_page(self, page_uri, heading, content):
		old_content = ''
		wiki_page = Page(self.request, page_uri)
		#if exists, get old page content
		if(wiki_page.exists()):
			old_content = wiki_page.get_raw_body()
		pagecontent = """\
== %(heading)s ==
%(content)s

%(old_content)s
""" % {'heading': heading, 'content': content, 'old_content': old_content }
		return(PageEditor(self.request, page_uri).saveText(pagecontent, 0))
Beispiel #24
0
    def _load_template(self, variable, default):
        if not variable and default:
            name = default
        else:
            name = getattr(self.request.cfg, variable, default)

        if not self.request.user.may.read(name):
            raise InviteException("You are not allowed to read template page '%s'." % name)

        page = Page(self.request, name)
        if not page.exists():
            raise InviteException("Template page '%s' does not exist." % name)

        return page.get_raw_body()
 def force_revert(self, pagename, request):
     rev = int(request.form['rev'][0])
     revstr = '%08d' % rev
     oldpg = Page(request, pagename, rev=rev)
     pg = PageEditor(request, pagename)
     _ = request.getText
     msg = _("Thank you for your changes. Your attention to detail is appreciated.")
     try:
         pg._write_file(oldpg.get_raw_body(),
                        action="SAVE/REVERT",
                        extra=revstr)
         pg.clean_acl_cache()
     except pg.SaveError, msg:
         pass
Beispiel #26
0
 def rollback_local_change(): # YYY direct local access
     comment = u"Wikisync rollback"
     rev = new_local_rev - 1
     revstr = '%08d' % rev
     oldpg = Page(self.request, sp.local_name, rev=rev)
     pg = PageEditor(self.request, sp.local_name)
     if not oldpg.exists():
         pg.deletePage(comment)
     else:
         try:
             savemsg = pg.saveText(oldpg.get_raw_body(), 0, comment=comment, extra=revstr, action="SAVE/REVERT")
         except PageEditor.Unchanged:
             pass
     return sp.local_name
Beispiel #27
0
 def rollback_local_change(): # YYY direct local access
     comment = u"Wikisync rollback"
     rev = new_local_rev - 1
     revstr = '%08d' % rev
     oldpg = Page(self.request, sp.local_name, rev=rev)
     pg = PageEditor(self.request, sp.local_name)
     if not oldpg.exists():
         pg.deletePage(comment)
     else:
         try:
             savemsg = pg.saveText(oldpg.get_raw_body(), 0, comment=comment, extra=revstr, action="SAVE/REVERT")
         except PageEditor.Unchanged:
             pass
     return sp.local_name
Beispiel #28
0
 def _retrieve_items(self, dict_name):
     # XXX in Moin 2.0 regex should not be used instead use DOM
     # tree to extract dict values. Also it should be possible to
     # convert dict values to different markups (wiki-markup,
     # creole...).
     #
     # Note that formatter which extracts dictionary from a
     # page was developed. See
     # http://hg.moinmo.in/moin/1.9-groups-dmilajevs/file/982f706482e7/MoinMoin/formatter/dicts.py
     page = Page(self.request, dict_name)
     text = page.get_raw_body()
     return dict([
         match.groups()
         for match in self._dict_page_parse_regex.finditer(text)
     ])
Beispiel #29
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        # page LanguageSetup needs no packing!
        existing_pages = [
            pagename for pagename in pagelist if Page(request, pagename).exists() and pagename != "LanguageSetup"
        ]
        if not existing_pages:
            return

        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        script = [packLine(["MoinMoinPackage", "1"])]

        fallback_timestamp = int(time.time())

        cnt = 0
        for pagename in existing_pages:
            pagename = pagename.strip()
            page = Page(request, pagename)
            files = _get_files(request, pagename)
            for attname in files:
                cnt += 1
                zipname = "%d" % cnt
                script.append(packLine(["ReplaceUnderlayAttachment", zipname, attname, pagename]))
                attpath = AttachFile.getFilename(request, pagename, attname)
                zf.write(attpath, zipname)

            cnt += 1
            zipname = "%d" % cnt
            script.append(packLine([function, zipname, pagename]))
            timestamp = wikiutil.version2timestamp(page.mtime_usecs())
            if not timestamp:
                # page.mtime_usecs() returns 0 for underlay pages
                timestamp = fallback_timestamp
            dt = datetime.fromtimestamp(timestamp)
            zi = zipfile.ZipInfo(filename=zipname, date_time=dt.timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))

        script += [packLine(["Print", "Installed MoinMaster page bundle %s." % os.path.basename(filename)])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
 def force_revert(self, pagename, request):
     rev = int(request.form['rev'][0])
     revstr = '%08d' % rev
     oldpg = Page(request, pagename, rev=rev)
     pg = PageEditor(request, pagename)
     _ = request.getText
     msg = _(
         "Thank you for your changes. Your attention to detail is appreciated."
     )
     try:
         pg._write_file(oldpg.get_raw_body(),
                        action="SAVE/REVERT",
                        extra=revstr)
         pg.clean_acl_cache()
     except pg.SaveError, msg:
         pass
    def _index_page_rev(self, request, connection, pagename, revno, mode='update'):
        """ Index a page revision.

        @param request: request suitable for indexing
        @param connection: the Indexer connection object
        @param pagename: the page name
        @param revno: page revision number (int)
        @param mode: 'add' = just add, no checks
                     'update' = check if already in index and update if needed (mtime)
        """
        page = Page(request, pagename, rev=revno)
        request.page = page # XXX for what is this needed?

        wikiname = request.cfg.interwikiname or u"Self"
        revision = str(page.get_real_rev())
        itemid = "%s:%s:%s" % (wikiname, pagename, revision)
        mtime = page.mtime_usecs()

        doc = self._get_document(connection, itemid, mtime, mode)
        logging.debug("%s %s %r" % (pagename, revision, doc))
        if doc:
            mimetype = 'text/%s' % page.pi['format']  # XXX improve this

            fields = {}
            fields['wikiname'] = wikiname
            fields['pagename'] = pagename
            fields['attachment'] = '' # this is a real page, not an attachment
            fields['mtime'] = str(mtime)
            fields['revision'] = revision
            fields['title'] = pagename
            fields['content'] = page.get_raw_body()
            fields['lang'], fields['stem_lang'] = self._get_languages(page)
            fields['author'] = page.edit_info().get('editor', '?')

            multivalued_fields = {}
            multivalued_fields['mimetype'] = [mt for mt in [mimetype] + mimetype.split('/')]
            multivalued_fields['domain'] = self._get_domains(page)
            multivalued_fields['linkto'] = page.getPageLinks(request)
            multivalued_fields['category'] = self._get_categories(page)

            self._add_fields_to_document(request, doc, fields, multivalued_fields)

            try:
                connection.replace(doc)
            except xappy.IndexerError, err:
                logging.warning("IndexerError at %r %r %r (%s)" % (
                    wikiname, pagename, revision, str(err)))
Beispiel #32
0
def execute(macro, args):
    _ = macro.request.getText
    pagename = args

    if not wikiutil.isFormPage(macro.request, pagename):
        return (macro.formatter.sysmsg(1) +
                macro.formatter.text('Not a form page: %s' % args) +
                macro.formatter.sysmsg(0))

    formpage = Page(macro.request, pagename)
    body = formpage.get_raw_body()

    pi_formtext = []
    pi_formfields = []

    while body and body[0] == '#':
        # extract first line
        try:
            line, body = body.split('\n', 1)
        except ValueError:
            line = body
            body = ''

        # skip comments (lines with two hash marks)
        if line[1] == '#': continue

        # parse the PI
        verb, args = (line[1:]+' ').split(' ', 1)
        verb = verb.lower()
        args = args.strip()

        if verb != 'form': continue

        # collect form definitions
        if not pi_formtext:
            pi_formtext.append('<table border="1" cellspacing="1" cellpadding="3">\n'
                '<form method="POST" action="%s">\n'
                '<input type="hidden" name="action" value="formtest">\n' % 'action')
        pi_formtext.append(wikiform.parseDefinition(macro.request, args, pi_formfields))

    # user-defined form preview?
    if pi_formtext:
        pi_formtext.append('<input type="hidden" name="fieldlist" value="%s">\n' %
            "|".join(pi_formfields))
        pi_formtext.append('</form></table>\n')

    return macro.formatter.rawHTML(''.join(pi_formtext))
Beispiel #33
0
def get_revisions(request, page, checkAccess=True):
    pagename = page.page_name
    if checkAccess and not request.user.may.read(pagename):
        return [], []

    parse_text = importPlugin(request.cfg, 'action', 'savegraphdata',
                              'parse_text')

    alldata = dict()
    revisions = dict()

    for rev in page.getRevList():
        revlink = '%s-gwikirevision-%d' % (pagename, rev)

        # Data about revisions is now cached to the graphdata
        # at the same time this is used.
        if request.graphdata.has_key(revlink):
            revisions[rev] = revlink
            continue

        # If not cached, parse the text for the page
        revpage = Page(request, pagename, rev=rev)
        text = revpage.get_raw_body()
        alldata = parse_text(request, revpage, text)
        if alldata.has_key(pagename):
            alldata[pagename].setdefault('meta',
                                         dict())[u'gwikirevision'] = \
                                         [unicode(rev)]
            # Do the cache.
            request.graphdata.cacheset(revlink, alldata[pagename])

            # Add revision as meta so that it is shown in the table
            revisions[rev] = revlink

    pagelist = [
        revisions[x]
        for x in sorted(revisions.keys(), key=ordervalue, reverse=True)
    ]

    metakeys = set()
    for page in pagelist:
        for key in request.graphdata.get_metakeys(page):
            metakeys.add(key)
    metakeys = sorted(metakeys, key=ordervalue)

    return pagelist, metakeys
Beispiel #34
0
class Image2Attach:

    def __init__(self, pagename, request):
        self.pagename = pagename
        self.request = request
        self.page = Page(request, pagename)
        self.image_urls = []
        self.images = {} # image binay files {filename: content}
        self.images_fetched = [] # images successful fetched
        self.process_success = 0 # count of process successful
        self.process_fail = 0 # count of process failed
        self.text = ''
        self.image_extenstions = ['jpg', 'gif', 'png']

    def process(self):
        parser = Parser()
        for line in WikiParser.eol_re.split(self.page.get_raw_body()):
            self.text += parser.parse(line, self.process_image_url) + '\n'
        self.text = self.text[:-1]
        self.write_file()

    def write_file(self):
        """scommit changes"""
        _ = self.request.getText
        if self.process_success > 0:
            PageEditor(self.request, self.pagename)._write_file(
                self.text,
                comment=_(
                    'Image2Attach(%s) saved images successful: %d, failed: %d' \
                    %(__version__, self.process_success, self.process_fail)
                    ),
                )

    def process_image_url(self, url):
        "download image and replace image url"""
        try:
            #url = self.image_url_re.findall(transclude)[0]
            image = self.fetchImage(url)
            self.process_success += 1
            url = url.replace('%20', '') # fix '%20' -> ' ' bug
            return 'attachment:' + self.addAttachment(url, image)
        except Exception, e:
            self.process_fail += 1
            return url
Beispiel #35
0
def execute(pagename, request):
    from MoinMoin import wikiutil
    from MoinMoin.Page import Page

    _ = request.getText
    thispage = Page(request, pagename)
   
    siocPage = "SiocSpecTemplate"
    specPath = "/var/www/html/spec/"
    specURL  = "http://sparql.captsolo.net/spec/"

    if pagename != siocPage:
        return thispage.send_page(request,
            msg = _('This action only works for SIOC template.'))

    # 1) get template (HTML) 
    #    = page contents

    template = thispage.get_raw_body()
    myMsg = '<p><b>Regenerated SIOC specification the template.</b></p>'

    # 2) run SpecGen code
    
    import sys
    sys.path.insert(0, specPath)
    import specgen4

    specgen4.setTermDir( specPath+'doc' )
    spec = specgen4.main( 'http://sw.deri.org/svn/sw/2005/08/sioc/ns/sioc', template )

    # 3) save file
    #

    file = open( specPath+'sioc.html', 'wt' )
    file.write(spec.encode('utf-8'))
    file.close()

    # 5) display message - OK

    myMsg += '<p>Check it out: <b><a href="' + specURL + '">SIOC specification</a></b> [draft]</p><p>&nbsp;</p>'

    return thispage.send_page(request,
        msg = _(myMsg))
def do_revert(pagename, request):
    from MoinMoin.PageEditor import PageEditor
    _ = request.getText

    if not request.user.may.revert(pagename):
        return Page(request, pagename).send_page(request,
            msg = _('You are not allowed to revert this page!'))

    rev = int(request.form['rev'][0])
    revstr = '%08d' % rev
    oldpg = Page(request, pagename, rev=rev)
    pg = PageEditor(request, pagename)

    try:
        savemsg = pg.saveText(oldpg.get_raw_body(), 0, extra=revstr,
                              action="SAVE/REVERT")
    except pg.SaveError, msg:
        # msg contain a unicode string
        savemsg = unicode(msg)
Beispiel #37
0
def execute(pagename, request):
    from MoinMoin import wikiutil
    from MoinMoin.Page import Page

    _ = request.getText
    thispage = Page(request, pagename)

    siocPage = "SiocSpecTemplate"
    specPath = "/var/www/html/spec/"
    specURL = "http://sparql.captsolo.net/spec/"

    if pagename != siocPage:
        return thispage.send_page(
            request, msg=_('This action only works for SIOC template.'))

    # 1) get template (HTML)
    #    = page contents

    template = thispage.get_raw_body()
    myMsg = '<p><b>Regenerated SIOC specification the template.</b></p>'

    # 2) run SpecGen code

    import sys
    sys.path.insert(0, specPath)
    import specgen4

    specgen4.setTermDir(specPath + 'doc')
    spec = specgen4.main('http://sw.deri.org/svn/sw/2005/08/sioc/ns/sioc',
                         template)

    # 3) save file
    #

    file = open(specPath + 'sioc.html', 'wt')
    file.write(spec.encode('utf-8'))
    file.close()

    # 5) display message - OK

    myMsg += '<p>Check it out: <b><a href="' + specURL + '">SIOC specification</a></b> [draft]</p><p>&nbsp;</p>'

    return thispage.send_page(request, msg=_(myMsg))
Beispiel #38
0
def get_revisions(request, page, checkAccess=True):
    pagename = page.page_name
    if checkAccess and not request.user.may.read(pagename):
        return [], []

    alldata = dict()
    revisions = dict()
    
    for rev in page.getRevList():
        revlink = '%s-gwikirevision-%d' % (pagename, rev)

        # Data about revisions is now cached to the graphdata
        # at the same time this is used.
        if request.graphdata.has_key(revlink):
            revisions[rev] = revlink
            continue

        # If not cached, parse the text for the page
        revpage = Page(request, pagename, rev=rev)
        text = revpage.get_raw_body()
        alldata = parse_text(request, revpage, text)
        if alldata.has_key(pagename):
            alldata[pagename].setdefault('meta',
                                         dict())[u'gwikirevision'] = \
                                         [unicode(rev)]
            # Do the cache.
            request.graphdata.cacheset(revlink, alldata[pagename])

            # Add revision as meta so that it is shown in the table
            revisions[rev] = revlink

    pagelist = [revisions[x] for x in sorted(revisions.keys(),
                                             key=ordervalue,
                                             reverse=True)]

    metakeys = set()
    for page in pagelist:
        for key in request.graphdata.get_metakeys(page):
            metakeys.add(key)
    metakeys = sorted(metakeys, key=ordervalue)

    return pagelist, metakeys
def do_raw(pagename, request):
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page(request)
        return

    request.http_headers(["Content-type: text/plain;charset=%s" % config.charset])

    if request.form.has_key('rev'):
        try:
            rev = request.form['rev'][0]
            try:
                rev = int(rev)
            except StandardError:
                rev = 0
        except KeyError:
            rev = 0
        page = Page(request, pagename, rev=rev)
    else:
        page = Page(request, pagename)

    text = page.get_raw_body()
    text = page.encodeTextMimeType(text)
    request.write(text)
    raise MoinMoinNoFooter
def execute(pagename, request):
    pagename_header = '%s-%s.zip' % (pagename, datetime.now().isoformat()[:10])
    pagename_header = pagename_header.encode('ascii', 'ignore')

    request.content_type = 'application/zip'
    request.headers['Content-Disposition'] = \
        'attachment; filename="%s"' % pagename_header

    args = values_to_form(request.values)

    try:
        args = args['args'][0]
    except (KeyError, IndexError):
        args = u''

    pagelist, metakeys, _ = metatable_parseargs(request,
                                                args,
                                                get_all_keys=True)

    renameDict = dict()

    for page in pagelist:
        metas = get_metas(request,
                          page, ["gwikirename"],
                          abs_attach=False,
                          checkAccess=False)
        renameList = metas["gwikirename"]
        if renameList:
            renameDict[page] = renameList

    output = StringIO()
    zip = zipfile.ZipFile(output, "w", zipfile.ZIP_DEFLATED)

    userid = user.getUserIdentification(request)
    script = [
        packLine(['MoinMoinPackage', '1']),
    ]
    counter = 0

    for pagename in pagelist:
        counter += 1
        page = Page(request, pagename)
        timestamp = wikiutil.version2timestamp(page.mtime_usecs())
        # Underlay pages are in epoch 0, zipfile in python 2.7 does
        # not support this.
        if not timestamp:
            pagefile, rev, exists = page.get_rev()
            if rev == 99999999:
                # We should never get here
                log.error("Page %s neither in pages or underlay, skipping." %
                          (pagename))
                continue
            timestamp = os.path.getctime(pagefile)
        pagetext = page.get_raw_body().encode("utf-8")
        filename = str(counter)
        zinfo = zipfile.ZipInfo(
            filename=filename,
            date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
        zinfo.compress_type = zipfile.ZIP_DEFLATED
        zip.writestr(zinfo, pagetext)

        targetNameList = renameDict.get(pagename, [pagename])
        for targetName in targetNameList:
            script.append(
                packLine(["AddRevision", filename, targetName, userid, ""]))

        for attachment in _get_files(request, pagename):
            counter += 1
            sourcefile = AttachFile.getFilename(request, pagename, attachment)
            filename = str(counter) + "-attachment"
            zip.write(sourcefile, filename)
            script.append(
                packLine([
                    "AddAttachment", filename, attachment, pagename, userid, ""
                ]))

    zip.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
    zip.close()

    request.write(output.getvalue())
Beispiel #41
0
def macro_PackageHeader(macro, arg1):
  package_name = get_unicode(macro.request, arg1)
  package_url = None

  try:
    import yaml
  except:
    return 'python-yaml is not installed on the wiki. Please have an admin install on this machine'

  if not package_name:
    return "ERROR in PackageHeader. Usage: [[PackageHeader(pkg_name)]]"    
  
  package_url = package_html_link(package_name)
  url = package_link(package_name) + "/manifest.yaml"
  
  try:
    usock = urllib2.urlopen(url)
    data = usock.read()
    usock.close()
  except:
    return 'Newly proposed, mistyped, or obsolete package. Could not find package "' + package_name + '" in rosdoc: '+url 

  data = yaml.load(unicode(data, 'utf-8'))
  if not data:
    return "Unable to retrieve package data. Auto-generated documentation may need to regenerate"
  # keys
  # - manifest keys
  brief = data.get('brief', package_name)
  authors = data.get('authors', 'unknown')
  try:
    if type(authors) != unicode:
      authors = unicode(authors, 'utf-8')
  except UnicodeDecodeError:
    authors = ''
  license = data.get('license', 'unknown')
  description = data.get('description', '')
  try:
    if type(description) != unicode:
      description = unicode(description, 'utf-8')
  except UnicodeDecodeError:
    description = ''
  depends = data.get('depends', [])
  depends_on = data.get('depends_on', [])
  review_status = data.get('review_status', 'unreviewed')
  review_notes = data.get('review_notes', '') or ''
  external_documentation = data.get('external_documentation', '') or data.get('url', '') or '' 
  if 'ros.org' in external_documentation or 'pr.willowgarage.com' in external_documentation:
     external_documentation = u''
  api_documentation = data.get('api_documentation', '')
  repository = data.get('repository', 'unknown')

  stack = data.get('stack', None)

  p = macro.formatter.paragraph
  url = macro.formatter.url
  div = macro.formatter.div
  em = macro.formatter.emphasis
  br = macro.formatter.linebreak
  strong = macro.formatter.strong
  li = macro.formatter.listitem
  ul = macro.formatter.bullet_list
  h = macro.formatter.heading
  text = macro.formatter.text
  rawHTML = macro.formatter.rawHTML
  comment = macro.formatter.comment

  if stack and stack.lower() not in ['ros', 'sandbox']:
    # set() logic is to get around temporary bug
    siblings = list(set(data.get('siblings', [])))
    # filter out test packages
    siblings = [s for s in siblings if not s.startswith('test_')]
    siblings.sort()
    pagename = macro.formatter.page.page_name

    if stack == pagename:
      top = strong(1)+text(stack)+strong(0)
    else:
      top = strong(1)+wiki_url(macro, stack)+strong(0)+text(': ')

    parts = []
    for s in siblings:
      if s == pagename:
        parts.append(text(s))
      else:
        parts.append(wiki_url(macro, s))
    #nav = em(1) + top + ' | '.join(parts) + em(0)
    nav = em(1) + top
    if parts: 
      nav += parts[0]
      for part in parts[1:]:
        nav += text(' | ')+part
    nav += em(0)
  elif stack and stack.lower() == 'sandbox':
    nav = strong(1)+wiki_url(macro, stack)+strong(0)
  else:
    nav = text('')
  
  # - package data keys
  msgs = data.get('msgs', [])
  srvs = data.get('srvs', [])

  # - package links
  #   -- link to msg/srv autogenerated docs
  msg_doc_title = "Msg/Srv API"
  if msgs and not srvs:
    msg_doc_title = "Msg API"
  elif srvs and not msgs:
    msg_doc_title = "Srv API"
  if msgs or srvs:
    msg_doc = li(1)+strong(1)+msg_doc_link(package_url, msg_doc_title)+strong(0)+li(0)
  else:
    msg_doc = text('')

  troubleshooting = Page(macro.request, '%s/Troubleshooting'%package_name).link_to(macro.request, text='Troubleshooting')
  tutorials = Page(macro.request, '%s/Tutorials'%package_name).link_to(macro.request, text='Tutorials')
  review_link = Page(macro.request, '%s/Reviews'%package_name).link_to(macro.request, text='Reviews')
  review_str = '%(review_link)s (%(review_status)s)'%locals()
  dependency_tree = data.get('dependency_tree', '')
  if external_documentation:
    external_documentation = li(1)+strong(1)+url(1, url=external_documentation)+text("External Documentation")+url(0)+strong(0)+li(0)
  
  try:
    package_desc = h(1, 2, id="first")+text('Package Summary')+h(0, 2)+\
      p(1, css_id="package-info")+rawHTML(description)+p(0)+\
      p(1, id="package-info")+\
      ul(1)+li(1)+text("Author: %s"%authors)+li(0)+\
      li(1)+text("License: %s"%license)+li(0)+\
      li(1)+text("Repository: %s"%repository)+li(0)+ul(0)+p(0)
    if package_name:
      repo_change =True
      page= Page(macro.request, package_name)
      pageeditor=PageEditor(macro.request, package_name)
      savetext = page.get_raw_body()
      lines = savetext.splitlines()
      #lines = [line.strip() for line in lines]
      for line in lines: 
        if line.startswith('## repository: %s'%repository):
          repo_change=False
    
      if repo_change ==True:
        lines = [line for line in lines if not line.startswith('## repository:')]
        savetext = u"## repository: %s\n%s" % (repository, "\n".join(lines))
        pageeditor.saveText(savetext, 0, action='SAVE', notify=False)


  except UnicodeDecodeError:
    package_desc = h(1, 2)+text('Package Summary')+h(0, 2)+\
      p(1)+text('Error retrieving package summary')+p(0)

  try:
    package_links = div(1, id="package-links")+\
      strong(1)+text("Package Links")+strong(0)+\
      ul(1)+\
      li(1)+strong(1)+url(1, url=package_url)+text("Code API")+url(0)+strong(0)+li(0)+msg_doc+\
      external_documentation+\
      li(1)+tutorials+li(0)+\
      li(1)+troubleshooting+li(0)+\
      li(1)+review_str+li(0)+\
      li(1)+url(1, url=dependency_tree)+text('Dependency Tree')+url(0)+li(0)+\
      ul(0)
  except UnicodeDecodeError:
    package_links = div(1, id="package-links")+div(0)

  if depends:
    depends.sort()
    package_links += strong(1)+text("Dependencies")+strong(0)+ul(1)
    for d in depends:
      package_links += li(1)+wiki_url(macro,d,shorten=20)+li(0)
    package_links += ul(0)

  if depends_on:
    depends_on.sort()
    d_links =  u'\n'.join([u"<li>%s</li>"%wiki_url(macro,d,shorten=20) for d in depends_on]) 
    package_links += strong(1)+text("Used by")+strong(0)+ul(1)+rawHTML(d_links)+ul(0)

  package_links+=div(0)

  #html_str = u''.join([s for s in [nav, package_links, package_desc]])
  #return html_str
  return rawHTML(nav) + package_links + package_desc 
Beispiel #42
0
def fetchcomments(startindex=1, endindex=9999):

    commentlist = []

    request = Globs.macro.request
    formatter = Globs.macro.formatter
    datapagename = Globs.datapagename

    pg = Page(request, datapagename)
    pagetext = pg.get_raw_body()

    regex = ur"""
^[\{]{3}\n
^(?P<icon>[^\n]*)\n
^(?P<name>[^\n]*)\n
^(?P<date>[^\n]*)\n\n
^(?P<text>
    \s*.*?
    (?=[\}]{3})
)[\}]{3}[\n]*
^[#]{2}PASSWORD[ ](?P<passwd>[^\n]*)[\n]*
^[#]{2}LOGINUSER[ ](?P<loginuser>[^\n]*)[\n]*"""

    pattern = re.compile(regex,
                         re.UNICODE + re.MULTILINE + re.VERBOSE + re.DOTALL)
    commentitems = pattern.findall(pagetext)

    cur_index = 0

    for item in commentitems:
        comment = {}
        cur_index += 1

        if cur_index < startindex:
            continue

        comment['index'] = cur_index

        custom_fields = item[0].split(',')

        comment['icon'] = custom_fields[0]

        if len(custom_fields) > 1:
            comment['markup'] = custom_fields[1].strip()
        else:
            comment['markup'] = ''

        comment['name'] = convertdelimeter(item[1], 1)
        comment['date'] = item[2]
        comment['text'] = convertdelimeter(item[3], 1)
        comment['passwd'] = item[4]
        comment['loginuser'] = item[5]

        # experimental
        comment['key'] = comment['date'].strip()

        commentlist.append(comment)

        if cur_index >= endindex:
            break

    return commentlist
Beispiel #43
0
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.values['date']
        try:
            date = long(date)  # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.values.get('rev1', -1))
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.values.get('rev2', 0))
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.values.get('ignorews', 0))

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date:  # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    oldlog = oldpage.editlog_entry()
    newlog = newpage.editlog_entry()

    if not oldlog or not newlog:
        # We use "No log entries found." msg because we already have i18n
        # for that. Better would "At least one log entry was not found.".
        request.theme.add_msg(_("No log entries found."), "error")
        currentpage.send_page()
        return

    edit_count = abs(newrev - oldrev)

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(_('Diff for "%s"') % (pagename, ),
                             pagename=pagename,
                             allow_doubleclick=1)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    page_url = wikiutil.escape(currentpage.url(request), True)

    def enabled(val):
        return not val and u' disabled="disabled"' or u''

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 """ % (page_url, rev2, _("Revert to this revision"),
        enabled(newrev < currentrev))

    other_diff_button_html = """
 <td style="border:0;">
  <form action="%s" method="get">
   <div style="text-align:%s">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
"""

    navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
 %(button)s
 <td style="border:0">
   %%s
 </td>
 %(button)s
</tr>
</table>
""" % {
        'button': other_diff_button_html
    }

    prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
    next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev

    prev_newrev = (newrev > 1) and (newrev - 1) or 1
    next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev

    navigation_html = navigation_html % (
        title,
        page_url,
        "left",
        prev_oldrev,
        oldrev,
        _("Previous change"),
        enabled(oldrev > 1),
        revert_html,
        page_url,
        "right",
        newrev,
        next_newrev,
        _("Next change"),
        enabled(newrev < currentrev),
    )

    request.write(f.rawHTML(navigation_html))

    def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes,
                     enabled_title, disabled_title):
        if enabled:
            return currentpage.link_to(
                request,
                on=1,
                querystr={
                    'action': 'diff',
                    'rev1': old_rev,
                    'rev2': new_rev,
                },
                css_class="diff-nav-link %s" % css_classes,
                title=enabled_title) + request.formatter.text(
                    caption) + currentpage.link_to(request, on=0)
        else:
            return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
                'css_classes': css_classes,
                'disabled_title': disabled_title,
                'caption': caption,
            }

    rev_info_html = """
  <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
  <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
  <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
  <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
        'rev_header': _('Revision %(rev)d as of %(date)s'),
        'rev_size_caption': _('Size'),
        'rev_author_caption': _('Editor'),
        'rev_ts_caption': _('Date'),
        'rev_comment_caption': _('Comment'),
    }

    rev_info_old_html = rev_info_html % {
        'rev_first_link':
        rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4',
                     'diff-first-link diff-old-rev',
                     _('Diff with oldest revision in left pane'),
                     _("No older revision available for diff")),
        'rev_prev_link':
        rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190',
                     'diff-prev-link diff-old-rev',
                     _('Diff with older revision in left pane'),
                     _("No older revision available for diff")),
        'rev_next_link':
        rev_nav_link(
            (oldrev < currentrev) and (next_oldrev < newrev), next_oldrev,
            newrev, u'\u2192', 'diff-next-link diff-old-rev',
            _('Diff with newer revision in left pane'),
            _("Can't change to revision newer than in right pane")),
        'rev_last_link':
        '',
        'rev':
        oldrev,
        'rev_size':
        oldpage.size(),
        'rev_author':
        oldlog.getEditor(request) or _('N/A'),
        'date':
        request.user.getFormattedDateTime(
            wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
        'rev_comment':
        wikiutil.escape(oldlog.comment) or '',
    }

    rev_info_new_html = rev_info_html % {
        'rev_first_link':
        '',
        'rev_prev_link':
        rev_nav_link(
            (newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev,
            u'\u2190', 'diff-prev-link diff-new-rev',
            _('Diff with older revision in right pane'),
            _("Can't change to revision older than revision in left pane")),
        'rev_next_link':
        rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192',
                     'diff-next-link diff-new-rev',
                     _('Diff with newer revision in right pane'),
                     _("No newer revision available for diff")),
        'rev_last_link':
        rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5',
                     'diff-last-link diff-old-rev',
                     _('Diff with newest revision in right pane'),
                     _("No newer revision available for diff")),
        'rev':
        newrev,
        'rev_size':
        newpage.size(),
        'rev_author':
        newlog.getEditor(request) or _('N/A'),
        'date':
        request.user.getFormattedDateTime(
            wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
        'rev_comment':
        wikiutil.escape(newlog.comment) or '',
    }

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(
            f.rawHTML(
                diff_html.diff(request,
                               oldpage.get_raw_body(),
                               newpage.get_raw_body(),
                               old_top=rev_info_old_html,
                               new_top=rev_info_new_html,
                               old_top_class="diff-info",
                               new_top_class="diff-info")))
        newpage.send_page(count_hit=0,
                          content_only=1,
                          content_id="content-below-diff")
    else:
        request.write(
            f.rawHTML(
                '<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>'
                % (rev_info_old_html, rev_info_new_html)))

        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(
                    _('The page was saved %(count)d times, though!') %
                    {'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')),
                              f.linebreak())
            else:
                qstr = {
                    'action': 'diff',
                    'ignorews': '1',
                }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(
                    f.paragraph(1),
                    Page(request, pagename).link_to(
                        request,
                        text=_('Ignore changes in the amount of whitespace'),
                        querystr=qstr,
                        rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0))  # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Beispiel #44
0
def execute(macro, args):
    args = args.split(',')
    if len(args) != 3:
        return "invalid arguments: <<CodeRef(blockname,start_line,end_line)>>"

    block = args[0]
    start_line = int(args[1])
    end_line = int(args[2])

    if not block:
        return "invalid arguments: no code block specified"
    if start_line > end_line:
        return "invalid arguments: start_line cannot be greater than end_line."

    request = macro.request
    content = []
    page_name = macro.formatter.page.page_name

    page = Page(request, page_name)
    body = page.get_raw_body()

    start_pat = re.compile("{{{\n(#!.*)\n")
    block_pat = re.compile("block=([-a-z0-9_]*)")
    end_pat = re.compile("}}}")
    i = 0
    code_block = None
    specline = None
    while i < len(body):
        m = start_pat.search(body, i)
        if m is None: break

        if m:
            specline = m.group(1)
            m2 = block_pat.search(specline)
            if m2:
                _block = m2.group(1)
                if block == _block:
                    m3 = end_pat.search(body, m.end())
                    if m3:
                        code_block = body[m.end():m3.start()]
                    else:
                        code_block = "unknown"
                    break
        i = m.end()

    if not code_block:
        return "Error: No code_block found"

    lines = code_block.split("\n")
    mylines = lines[start_line - 1:end_line]
    code_block = string.join(mylines, "\n")

    out = StringIO.StringIO()
    macro.request.redirect(out)
    wikiizer = wiki.Parser(
        "{{{\n" + specline + " start=%d" % start_line + "\n" + code_block +
        "\n}}}\n", macro.request)
    wikiizer.format(macro.formatter)
    result = out.getvalue()
    macro.request.redirect()
    del out

    return result
Beispiel #45
0
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.values['date']
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.values.get('rev1', -1))
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.values.get('rev2', 0))
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.values.get('ignorews', 0))

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    oldlog = oldpage.editlog_entry()
    newlog = newpage.editlog_entry()

    if not oldlog or not newlog:
        # We use "No log entries found." msg because we already have i18n
        # for that. Better would "At least one log entry was not found.".
        request.theme.add_msg(_("No log entries found."), "error")
        currentpage.send_page()
        return

    edit_count = abs(newrev - oldrev)

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    page_url = wikiutil.escape(currentpage.url(request), True)

    def enabled(val):
        return not val and u' disabled="disabled"' or u''

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 """ % (page_url, rev2, _("Revert to this revision"), enabled(newrev < currentrev))

    other_diff_button_html = """
 <td style="border:0;">
  <form action="%s" method="get">
   <div style="text-align:%s">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
"""

    navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
 %(button)s
 <td style="border:0">
   %%s
 </td>
 %(button)s
</tr>
</table>
""" % {'button': other_diff_button_html}

    prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
    next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev

    prev_newrev = (newrev > 1) and (newrev - 1) or 1
    next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev

    navigation_html = navigation_html % (title,
       page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1),
       revert_html,
       page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), )

    request.write(f.rawHTML(navigation_html))

    def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes, enabled_title, disabled_title):
        if enabled:
            return currentpage.link_to(request, on=1, querystr={
                    'action': 'diff',
                    'rev1': old_rev,
                    'rev2': new_rev,
                    }, css_class="diff-nav-link %s" % css_classes, title=enabled_title) + request.formatter.text(caption) + currentpage.link_to(request, on=0)
        else:
            return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
                'css_classes': css_classes,
                'disabled_title': disabled_title,
                'caption': caption,
                }

    rev_info_html = """
  <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
  <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
  <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
  <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
    'rev_header': _('Revision %(rev)d as of %(date)s'),
    'rev_size_caption': _('Size'),
    'rev_author_caption': _('Editor'),
    'rev_ts_caption': _('Date'),
    'rev_comment_caption': _('Comment'),
}

    rev_info_old_html = rev_info_html % {
        'rev_first_link': rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4', 'diff-first-link diff-old-rev', _('Diff with oldest revision in left pane'), _("No older revision available for diff")),
        'rev_prev_link': rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190', 'diff-prev-link diff-old-rev', _('Diff with older revision in left pane'), _("No older revision available for diff")),
        'rev_next_link': rev_nav_link((oldrev < currentrev) and (next_oldrev < newrev), next_oldrev, newrev, u'\u2192', 'diff-next-link diff-old-rev', _('Diff with newer revision in left pane'), _("Can't change to revision newer than in right pane")),
        'rev_last_link': '',
        'rev': oldrev,
        'rev_size': oldpage.size(),
        'rev_author': oldlog.getEditor(request) or _('N/A'),
        'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
        'rev_comment': wikiutil.escape(oldlog.comment) or '',
    }

    rev_info_new_html = rev_info_html % {
        'rev_first_link': '',
        'rev_prev_link': rev_nav_link((newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev, u'\u2190', 'diff-prev-link diff-new-rev', _('Diff with older revision in right pane'), _("Can't change to revision older than revision in left pane")),
        'rev_next_link': rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192', 'diff-next-link diff-new-rev', _('Diff with newer revision in right pane'), _("No newer revision available for diff")),
        'rev_last_link': rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5', 'diff-last-link diff-old-rev', _('Diff with newest revision in right pane'), _("No newer revision available for diff")),
        'rev': newrev,
        'rev_size': newpage.size(),
        'rev_author': newlog.getEditor(request) or _('N/A'),
        'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
        'rev_comment': wikiutil.escape(newlog.comment) or '',
    }

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body(), old_top=rev_info_old_html, new_top=rev_info_new_html, old_top_class="diff-info", new_top_class="diff-info")))
        newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        request.write(f.rawHTML('<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>' % (rev_info_old_html, rev_info_new_html)))

        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
                    'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
            else:
                qstr = {'action': 'diff', 'ignorews': '1', }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(f.paragraph(1), Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr, rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Beispiel #46
0
def fetchcomments(startindex=1, endindex=9999):

    commentlist = []

    request = Globs.macro.request
    formatter = Globs.macro.formatter
    datapagename = Globs.datapagename

    pg = Page(request, datapagename)
    pagetext = pg.get_raw_body()

    regex = ur"""
^[\{]{3}\n
^(?P<icon>[^\n]*)\n
^(?P<name>[^\n]*)\n
^(?P<date>[^\n]*)\n
^(?P<rating>[^\n]*)\n\n
^(?P<text>
    \s*.*?
    (?=[\}]{3})
)[\}]{3}[\n]*
^[#]{2}PASSWORD[ ](?P<passwd>[^\n]*)[\n]*
^[#]{2}LOGINUSER[ ](?P<loginuser>[^\n]*)[\n]*"""

    pattern = re.compile(regex,
                         re.UNICODE + re.MULTILINE + re.VERBOSE + re.DOTALL)
    commentitems = pattern.findall(pagetext)

    cur_index = 0

    for item in commentitems:
        comment = {}
        cur_index += 1

        if cur_index < startindex:
            continue

        comment['index'] = cur_index

        custom_fields = item[0].split(',')

        comment['icon'] = custom_fields[0]

        if len(custom_fields) > 1:
            comment['markup'] = custom_fields[1].strip()
        else:
            comment['markup'] = ''

        now = time.time()
        t = float(item[2])
        date = time.strftime(request.cfg.datetime_fmt, time.localtime(t))

        dt = now - t
        datestr = ""

        years = int(dt / (86400 * 365))
        months = int(dt / (86400 * 30))
        days = int(dt / 86400)
        hours = int(dt / 3600)
        minutes = int(dt / 60)

        if years > 1: datestr = " (%d years ago)" % years
        if years == 1: datestr = " (%d year ago)" % years
        elif months > 1: datestr = " (%d months ago)" % months
        elif days > 1: datestr = " (%d days ago)" % days
        elif days == 1: datestr = " (%d day ago)" % days
        elif hours > 1: datestr = " (%d hours ago)" % hours
        elif hours == 1: datestr = " (%d hour ago)" % hours
        elif minutes > 1: datestr = " (%d minutes ago)" % minutes
        elif minutes == 1: datestr = " (%d minute ago)" % minutes
        else:
            datestr = " (%d seconds ago)" % int(dt)

        comment['name'] = convertdelimeter(item[1], 1)
        comment['date'] = date + datestr
        comment['rating'] = convertdelimeter(item[3], 1)
        comment['text'] = convertdelimeter(item[4], 1)
        comment['passwd'] = item[5]
        comment['loginuser'] = item[6]

        # experimental
        comment['key'] = item[2]

        commentlist.append(comment)

        if cur_index >= endindex:
            break

    return commentlist
Beispiel #47
0
    def save(self, editor, newtext, rev, **kw):
        BLACKLISTPAGES = ["BadContent", "LocalBadContent"]
        if not editor.page_name in BLACKLISTPAGES:
            request = editor.request

            # Start timing of antispam operation
            request.clock.start('antispam')

            blacklist = []
            latest_mtime = 0
            for pn in BLACKLISTPAGES:
                do_update = (
                    pn != "LocalBadContent"
                    and request.cfg.interwikiname != 'MoinMaster'
                )  # MoinMaster wiki shall not fetch updates from itself
                blacklist_mtime, blacklist_entries = getblacklist(
                    request, pn, do_update)
                blacklist += blacklist_entries
                latest_mtime = max(latest_mtime, blacklist_mtime)

            if blacklist:
                invalid_cache = not getattr(request.cfg.cache,
                                            "antispam_blacklist", None)
                if invalid_cache or request.cfg.cache.antispam_blacklist[
                        0] < latest_mtime:
                    mmblcache = []
                    for blacklist_re in blacklist:
                        try:
                            mmblcache.append(re.compile(blacklist_re, re.I))
                        except re.error, err:
                            logging.error(
                                "Error in regex '%s': %s. Please check the pages %s."
                                % (blacklist_re, str(err),
                                   ', '.join(BLACKLISTPAGES)))
                    request.cfg.cache.antispam_blacklist = (latest_mtime,
                                                            mmblcache)

                from MoinMoin.Page import Page

                oldtext = ""
                if rev > 0:  # rev is the revision of the old page
                    page = Page(request, editor.page_name, rev=rev)
                    oldtext = page.get_raw_body()

                newset = frozenset(newtext.splitlines(1))
                oldset = frozenset(oldtext.splitlines(1))
                difference = newset - oldset
                addedtext = kw.get('comment', u'') + u''.join(difference)

                for blacklist_re in request.cfg.cache.antispam_blacklist[1]:
                    match = blacklist_re.search(addedtext)
                    if match:
                        # Log error and raise SaveError, PageEditor should handle this.
                        _ = editor.request.getText
                        msg = _(
                            'Sorry, can not save page because "%(content)s" is not allowed in this wiki.'
                        ) % {
                            'content': wikiutil.escape(match.group())
                        }
                        logging.info(msg)
                        raise editor.SaveError(msg)
            request.clock.stop('antispam')
Beispiel #48
0
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M), called_by_toc=0):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text,))

    # prepare including page
    result = []
    print_mode = macro.form.has_key('action') and macro.form['action'][0] == "print"
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(request, this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if this_page._macroInclude_pagelist.has_key(inc_name):
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name,))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error, e:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            newbody = []
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if called_by_toc:
            result.append(inc_page.get_raw_body())
            continue

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name
        
        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title(request)
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                import sha
                from MoinMoin import config
                # this heading id might produce duplicate ids,
                # if the same page is included multiple times
                # Encode stuf we feed into sha module.
                pntt = (inc_name + heading).encode(config.charset)
                hid = "head-" + sha.new(pntt).hexdigest()
                request._page_headings.setdefault(pntt, 0)
                request._page_headings[pntt] += 1
                if request._page_headings[pntt] > 1:
                    hid += '-%d'%(request._page_headings[pntt],)
                result.append(
                    #macro.formatter.heading(1, level, hid,
                    #    icons=edit_icon.replace('<img ', '<img align="right" ')) +
                    macro.formatter.heading(1, level, hid) +
                    inc_page.link_to(request, heading, css_class="include-heading-link") +
                    macro.formatter.heading(0, level)
                )

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            cid = request.makeUniqueID("Include_%s" % wikiutil.quoteWikinameFS(inc_page.page_name))
            inc_page.send_page(request, content_only=1, content_id=cid)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                '<div class="include-link">',
                inc_page.link_to(request, '[%s]' % (inc_name,), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'),), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                '</div>',
            ])
Beispiel #49
0
def execute(macro, args):

   request = macro.request

   # get params
   if args:
      args = args.split(',')
   else:
      args = []

   kw = {}
   for arg in args :
      if '=' in arg:
         key, value = arg.split('=', 1)
         kw[str(key.strip())] = value.strip()

   try:
      maxTags = int( kw["maxTags"] )
   except (KeyError, ValueError):
      maxTags = 50

   try:
      autoCreate = kw["autoCreate"]
      if autoCreate == "true" or autoCreate == "True":
         autoCreate = True
      else:
         autoCreate = False

   except (KeyError):
      autoCreate = False

   #{level:hits , level:hits , ...}
   level = { 0 : 4 , 1 : 7 , 2 : 12 , 3 : 18 , 4 : 25 , 5 : 35 , 6 : 50 , 7 : 60 , 8 : 90 }

   args = r'regex:((\r)?\n----(\r)?\n[a-zA-Z])'

   # Search the pages
   query = search.QueryParser().parse_query(args)
   results = search.searchPages(request, query)
   pages = [hit.page_name for hit in results.hits]

   tags = []

   for page in pages:
      page = Page(request, page)
      if page.isStandardPage() and not page.isUnderlayPage():
         body = page.get_raw_body()
         match = re.search(r'----(\r)?\n(?P<tags>.*)(\r)?\n', body)
         match = match.group('tags')
         match = match.split(',')
         for tag in match:
            tags.insert(0, (str(tag)).strip())

   taglist = []
   taglist = list(frozenset(tags))

   def sort(t):
      return t[1]

   show = []
   for tag in taglist:
      show.append( (tag, tags.count(tag)) )
   show.sort(key=sort, reverse=True)
   show = show[0:maxTags]
   show.sort()

   html = []

   for tag in show:

      pagename = tag[0]
      hits = tag[1]

      # auto create tag page if not exist
      if autoCreate:
         page = Page(request, pagename)
         if page.isStandardPage(includeDeleted=False) == False and page.isUnderlayPage() == False:

            from MoinMoin.security import Permissions
            class SecurityPolicy(Permissions):
               def write(*args, **kw):
                  return True
               def save(*args, **kw):
                  return True
            request.user.may = SecurityPolicy(request.user)

            PageEditor(request, pagename).saveText(ur"[[FullSearch(regex:(-{4}(\r)?\n(.*)%s))]]"%(tag[0]), 0)

      #level0
      if hits < level[0]:
         html.append(u'<span  style="font-size:0.65em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level1
      elif hits < level[1]:
         html.append(u'<span  style="font-size:0.75em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level2
      elif hits < level[2]:
         html.append(u'<span  style="font-size:0.9em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level3
      elif hits < level[3]:
         html.append(u'<span  style="font-size:1.0em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level4
      elif hits < level[4]:
         html.append(u'<span  style="font-size:1.05em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level5
      elif hits < level[5]:
         html.append(u'<span  style="font-size:1.1em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level6
      elif hits < level[6]:
         html.append(u'<span  style="font-size:1.15em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level7
      elif hits < level[7]:
         html.append(u'<span  style="font-size:1.2em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level8
      elif hits < level[8]:
         html.append(u'<span  style="font-size:1.25em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))
      #level9
      else:
         html.append(u'<span  style="font-size:1.3em;"><a href="%s"> %s</a></span>'%(pagename, tag[0]))

   return ''.join(html)
Beispiel #50
0
def convertfile(page, output=None, overwrite=False):
    pagedir = page.getPagePath()
    pagename = wikiname(pagedir)

    if not output:
        output = pagename

    print "Converting %s" % pagename

    if page.isUnderlayPage():
        print "underlay: %s" % page.request.cfg.data_underlay_dir
        print "underlay: %s" % request.cfg.data_underlay_dir
        print "SKIP UNDERLAY: %s" % pagename
        return False

    current_exists = page.exists()
    current_rev = page.current_rev()

    if convert_attic:
        revs = page.getRevList()
    else:
        revs = [current_rev]

    # Generate random ID Number for collision avoidance when attachments in Namespace have the same name
    randomID = random.randint(101, 999)

    for rev in revs:
        page = Page(request, pagename, rev=rev)
        pagefile, realrev, exists = page.get_rev(rev=rev)

        mtime = page.mtime_usecs() / USEC

        if not mtime:
            if os.path.exists(pagefile) != exists:
                raise Exception, "IT SHOULD NOT HAPPEN"

            if os.path.exists(pagefile):
                mtime = int(os.path.getmtime(pagefile))
                print "recovered %s: %s" % (rev, mtime)

            if not mtime:
                print "NO REVISION: for %s" % pagefile
                continue

        if rev == current_rev:
            out_file = os.path.join(output_dir, 'pages', dw.wikiFN(output))
            if not convert_attic and not exists:
                # if not converting attic, allow current version may not exist anymore
                continue
        else:
            out_file = os.path.join(output_dir, 'attic',
                                    dw.wikiFN(output, str(mtime)))

        content = moin2doku(pagename, page.get_raw_body(), randomID)
        if len(content) == 0:
            #			raise Exception, "No content"
            print "NO CONTENT: exists: %s,%s" % (exists,
                                                 os.path.exists(pagefile))

        writefile(out_file, content, overwrite=overwrite)
        copystat(pagefile, out_file)

    ID = dw.cleanID(output)
    copy_attachments(page, dw.getNS(ID), randomID)

    # convert edit-log, it's always present even if current page is not
    convert_editlog(page, output=output, overwrite=overwrite)

    # add to redirect.conf if filenames differ
    # and page must exist (no redirect for deleted pages)
    if redirect_conf and current_exists:
        # redirect dokuwiki plugin is quite picky
        # - it doesn't understand if entries are not lowercase
        # - it doesn't understand if paths are separated by forward slash
        old_page = pagename.lower().replace('/', ':').replace(' ', '_')
        if old_page != ID:
            redirect_map[old_page] = ID

    print "Converted %s as %s" % (pagename, dw.wikiFN(output))

    return True
Beispiel #51
0
def execute(macro,
            text,
            args_re=re.compile(_args_re_pattern),
            title_re=re.compile(_title_re, re.M),
            called_by_toc=0):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = text and args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) %
                (text, ))

    # prepare including page
    result = []
    print_mode = macro.form.has_key('action') and macro.form['action'][0] in (
        "print", "format")
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(request, this_page.page_name,
                                    args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass  # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if this_page._macroInclude_pagelist.has_key(inc_name):
            result.append(
                u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>'
                % (inc_name, ))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        if not inc_page.exists():
            continue
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'

        #        body = body.replace(this_page.page_name, "_" + this_page.page_name + "_")
        body = body.replace('amcl', "_" + this_page.page_name + "_")

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            cid = request.makeUniqueID(
                "Include_%s" % wikiutil.quoteWikinameURL(inc_page.page_name))
            inc_page.send_page(request,
                               content_only=1,
                               content_id=cid,
                               omit_footnotes=True)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

    # return include text
    str = ''.join(result)
    return str
def execute(macro, text):
    request = macro.request
    formatter = macro.formatter
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    currentyear, currentmonth, currentday, h, m, s, wd, yd, ds = request.user.getTime(time.time())
    thispage = formatter.page.page_name
    # does the url have calendar params (= somebody has clicked on prev/next links in calendar) ?
    if 'calparms' in macro.request.args:
        has_calparms = 1 # yes!
        text2 = macro.request.args['calparms']
        cparmpagename, cparmyear, cparmmonth, cparmoffset, cparmoffset2, cparmheight6, cparmanniversary, cparmtemplate = \
            parseargs(request, text2, thispage, currentyear, currentmonth, 0, 0, False, False, u'')
        # Note: cparmheight6 and cparmanniversary are not used, they are just there
        # to have a consistent parameter string in calparms and macro args
    else:
        has_calparms = 0

    if text is None: # macro call without parameters
        text = u''

    # parse and check arguments
    parmpagename, parmyear, parmmonth, parmoffset, parmoffset2, parmheight6, anniversary, parmtemplate = \
        parseargs(request, text, thispage, currentyear, currentmonth, 0, 0, False, False, u'')

    # does url have calendar params and is THIS the right calendar to modify (we can have multiple
    # calendars on the same page)?
    #if has_calparms and (cparmpagename,cparmyear,cparmmonth,cparmoffset) == (parmpagename,parmyear,parmmonth,parmoffset):

    # move all calendars when using the navigation:
    if has_calparms and cparmpagename == parmpagename:
        year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset + cparmoffset2)
        parmoffset2 = cparmoffset2
        parmtemplate = cparmtemplate
    else:
        year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset)

    if request.isSpiderAgent and abs(currentyear - year) > 1:
        return '' # this is a bot and it didn't follow the rules (see below)
    if currentyear == year:
        attrs = {}
    else:
        attrs = {'rel': 'nofollow' } # otherwise even well-behaved bots will index forever

    # get the calendar
    monthcal = calendar.monthcalendar(year, month)

    # european / US differences
    months = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December')
    # Set things up for Monday or Sunday as the first day of the week
    if calendar.firstweekday() == calendar.MONDAY:
        wkend = (5, 6)
        wkdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
    if calendar.firstweekday() == calendar.SUNDAY:
        wkend = (0, 6)
        wkdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')

    colorstep = 85
    p = Page(request, thispage)
    qpagenames = '*'.join([wikiutil.quoteWikinameURL(pn) for pn in parmpagename])
    qtemplate = wikiutil.quoteWikinameURL(parmtemplate)
    querystr = "calparms=%%s,%d,%d,%d,%%d,,,%%s" % (parmyear, parmmonth, parmoffset)
    prevlink = p.url(request, querystr % (qpagenames, parmoffset2 - 1, qtemplate))
    nextlink = p.url(request, querystr % (qpagenames, parmoffset2 + 1, qtemplate))
    prevylink = p.url(request, querystr % (qpagenames, parmoffset2 - 12, qtemplate))
    nextylink = p.url(request, querystr % (qpagenames, parmoffset2 + 12, qtemplate))

    prevmonth = formatter.url(1, prevlink, 'cal-link', **attrs) + '&lt;' + formatter.url(0)
    nextmonth = formatter.url(1, nextlink, 'cal-link', **attrs) + '&gt;' + formatter.url(0)
    prevyear = formatter.url(1, prevylink, 'cal-link', **attrs) + '&lt;&lt;' + formatter.url(0)
    nextyear = formatter.url(1, nextylink, 'cal-link', **attrs) + '&gt;&gt;' + formatter.url(0)

    if parmpagename != [thispage]:
        pagelinks = ''
        r, g, b = (255, 0, 0)
        l = len(parmpagename[0])
        steps = len(parmpagename)
        maxsteps = (255 / colorstep)
        if steps > maxsteps:
            steps = maxsteps
        chstep = int(l / steps)
        st = 0
        while st < l:
            ch = parmpagename[0][st:st+chstep]
            r, g, b = cliprgb(r, g, b)
            link = Page(request, parmpagename[0]).link_to(request, ch,
                        rel='nofollow',
                        style='background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b))
            pagelinks = pagelinks + link
            r, g, b = (r, g+colorstep, b)
            st = st + chstep
        r, g, b = (255-colorstep, 255, 255-colorstep)
        for page in parmpagename[1:]:
            link = Page(request, page).link_to(request, page,
                        rel='nofollow',
                        style='background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b))
            pagelinks = pagelinks + '*' + link
        showpagename = '   %s<BR>\n' % pagelinks
    else:
        showpagename = ''
    if calendar.firstweekday() == calendar.SUNDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, months[month-1], str(year), nextmonth, nextyear)
    if calendar.firstweekday() == calendar.MONDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;/&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, str(year), month, nextmonth, nextyear)
    restr1 = ' <tr>\n%s </tr>\n' % resth1

    r7 = range(7)
    restd2 = []
    for wkday in r7:
        wday = _(wkdays[wkday])
        if wkday in wkend:
            cssday = "cal-weekend"
        else:
            cssday = "cal-workday"
        restd2.append('  <td class="%s">%s</td>\n' % (cssday, wday))
    restr2 = ' <tr>\n%s </tr>\n' % "".join(restd2)

    if parmheight6:
        while len(monthcal) < 6:
            monthcal = monthcal + [[0, 0, 0, 0, 0, 0, 0]]

    maketip_js = []
    restrn = []
    for week in monthcal:
        restdn = []
        for wkday in r7:
            day = week[wkday]
            if not day:
                restdn.append('  <td class="cal-invalidday">&nbsp;</td>\n')
            else:
                page = parmpagename[0]
                if anniversary:
                    link = "%s/%02d-%02d" % (page, month, day)
                else:
                    link = "%s/%4d-%02d-%02d" % (page, year, month, day)
                daypage = Page(request, link)
                if daypage.exists() and request.user.may.read(link):
                    csslink = "cal-usedday"
                    query = {}
                    r, g, b, u = (255, 0, 0, 1)
                    daycontent = daypage.get_raw_body()
                    header1_re = re.compile(r'^\s*=\s(.*)\s=$', re.MULTILINE) # re.UNICODE
                    titletext = []
                    for match in header1_re.finditer(daycontent):
                        if match:
                            title = match.group(1)
                            title = wikiutil.escape(title).replace("'", "\\'")
                            titletext.append(title)
                    tipname_unescaped = link.replace("'", "\\'")
                    link = wikiutil.escape(link).replace("'", "\\'")
                    tipname = link
                    tiptitle = link
                    tiptext = '<br>'.join(titletext)
                    maketip_js.append("maketip('%s','%s','%s');" % (tipname, tiptitle, tiptext))
                    attrs = {'onMouseOver': "tip('%s')" % tipname_unescaped,
                             'onMouseOut': "untip()"}
                else:
                    csslink = "cal-emptyday"
                    if parmtemplate:
                        query = {'action': 'edit', 'template': parmtemplate}
                    else:
                        query = {}
                    r, g, b, u = (255, 255, 255, 0)
                    if wkday in wkend:
                        csslink = "cal-weekend"
                    attrs = {'rel': 'nofollow'}
                for otherpage in parmpagename[1:]:
                    otherlink = "%s/%4d-%02d-%02d" % (otherpage, year, month, day)
                    otherdaypage = Page(request, otherlink)
                    if otherdaypage.exists():
                        csslink = "cal-usedday"
                        if u == 0:
                            r, g, b = (r-colorstep, g, b-colorstep)
                        else:
                            r, g, b = (r, g+colorstep, b)
                r, g, b = cliprgb(r, g, b)
                style = 'background-color:#%02x%02x%02x' % (r, g, b)
                fmtlink = formatter.url(1, daypage.url(request, query), csslink, **attrs) + str(day) + formatter.url(0)
                if day == currentday and month == currentmonth and year == currentyear:
                    cssday = "cal-today"
                    fmtlink = "<b>%s</b>" % fmtlink # for browser with CSS probs
                else:
                    cssday = "cal-nottoday"
                restdn.append('  <td style="%s" class="%s">%s</td>\n' % (style, cssday, fmtlink))
        restrn.append(' <tr>\n%s </tr>\n' % "".join(restdn))

    restable = '<table border="2" cellspacing="2" cellpadding="2">\n<col width="14%%" span="7">%s%s%s</table>\n'
    restable = restable % (restr1, restr2, "".join(restrn))

    if maketip_js:
        tip_js = '''<script language="JavaScript" type="text/javascript">
<!--
%s
// -->
</script>
''' % '\n'.join(maketip_js)
    else:
        tip_js = ''

    result = """\
<script type="text/javascript" src="%s/common/js/infobox.js"></script>
<div id="%s" style="position:absolute; visibility:hidden; z-index:20; top:-999em; left:0px;"></div>
%s%s
""" % (request.cfg.url_prefix_static, formatter.make_id_unique('infodiv'), tip_js, restable)
    return formatter.rawHTML(result)
Beispiel #53
0
def macro_Slider(macro,
                 pagename=u'SliderContent',
                 width=u'900px',
                 height=u'100px',
                 interval=u'3000'):
    """
    @param pagename: the pagename for the list to cycle through.
    """
    f = macro.formatter
    request = macro.request
    _ = request.getText

    if request.user.may.read(pagename):
        page = Page(request, pagename)
        raw = page.get_raw_body()
    else:
        raw = ""

    username = request.user.name or 'Anonymous'
    # this selects lines looking like a list item
    quotes = raw.splitlines()
    quotes = [quote.strip() for quote in quotes]
    quotes = [quote[2:] for quote in quotes if quote.startswith('* ')]
    if not quotes:
        return (macro.formatter.highlight(1) +
                _('No quotes on %(pagename)s.') % {
                    'pagename': pagename
                } + macro.formatter.highlight(0))

    name = pagename.lower().replace('/', '_')

    result = []
    result.append(
        f.rawHTML(u'<script type="text/javascript" ' +
                  'src="%s/common/js/jssor.slider.min.js"></script>' %
                  request.cfg.url_prefix_static))
    result.append(
        f.rawHTML(
            u'<script>slider_%s_starter = function (id) { var options = { $AutoPlay: true, $AutoPlayInterval: %s }; var slider_%s = new $JssorSlider$(id, options); };</script>'
            % (name, interval, name)))

    result.append(
        f.rawHTML(
            u'<div id="slider_%s_container" style="position: relative; top: 0px; left: 0px; width: %s; height: %s;">'
            % (name, width, height)))
    result.append(
        f.rawHTML(
            u'<div u="slides" style="cursor: move; position: absolute; left: 0px; top: 0px; width: %s; height: %s; overflow: hidden;">'
            % (width, height)))

    for quote in quotes:
        if quote.startswith('[[') and quote.endswith(']]'):
            quote = quote[2:-2]
        page.set_raw_body(Page(request, quote).get_raw_body(), 1)
        text = request.redirectedOutput(page.send_page,
                                        content_only=1,
                                        content_id="Slider")
        result.append(f.rawHTML('<div style="visiblity: hidden">'))
        result.append(f.rawHTML(text))
        result.append(f.div(0))

    result.append(f.div(0))
    result.append(
        f.rawHTML('<script>slider_' + name + '_starter("slider_' + name +
                  '_container");</script>'))
    result.append(f.div(0))

    return ''.join(result)
from MoinMoin import wikiutil
from MoinMoin.request import RequestCLI
from MoinMoin.Page import Page
from MoinMoin.PageEditor import PageEditor

def makepage(rootdir, pagename, text):
    """quick and dirty!"""
    pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename))
    os.makedirs(pagedir)
    
    revstr = '%08d' % 1
    cf = os.path.join(pagedir, 'current')
    open(cf, 'w').write(revstr+'\n')
    
    revdir = os.path.join(pagedir, 'revisions')
    os.makedirs(revdir)
    tf = os.path.join(revdir, revstr)
    text = text.replace("\n","\r\n")
    codecs.open(tf, 'wb', config.charset).write(text)
    
request = RequestCLI(url=url)
request.form = request.args = request.setup_args()

pagelist = list(request.rootpage.getPageList(user=''))
for pagename in pagelist:
    p = Page(request, pagename)
    text = p.get_raw_body()
    makepage(destdir, pagename, text)
    

Beispiel #55
0
def execute(macro, text):
    request = macro.request
    formatter = macro.formatter
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    currentyear, currentmonth, currentday, h, m, s, wd, yd, ds = request.user.getTime(
        time.time())
    thispage = formatter.page.page_name
    # does the url have calendar params (= somebody has clicked on prev/next links in calendar) ?
    if 'calparms' in macro.request.args:
        has_calparms = 1  # yes!
        text2 = macro.request.args['calparms']
        cparmpagename, cparmyear, cparmmonth, cparmoffset, cparmoffset2, cparmheight6, cparmanniversary, cparmtemplate = \
            parseargs(request, text2, thispage, currentyear, currentmonth, 0, 0, False, False, u'')
        # Note: cparmheight6 and cparmanniversary are not used, they are just there
        # to have a consistent parameter string in calparms and macro args
    else:
        has_calparms = 0

    if text is None:  # macro call without parameters
        text = u''

    # parse and check arguments
    parmpagename, parmyear, parmmonth, parmoffset, parmoffset2, parmheight6, anniversary, parmtemplate = \
        parseargs(request, text, thispage, currentyear, currentmonth, 0, 0, False, False, u'')

    # does url have calendar params and is THIS the right calendar to modify (we can have multiple
    # calendars on the same page)?
    #if has_calparms and (cparmpagename,cparmyear,cparmmonth,cparmoffset) == (parmpagename,parmyear,parmmonth,parmoffset):

    # move all calendars when using the navigation:
    if has_calparms and cparmpagename == parmpagename:
        year, month = yearmonthplusoffset(parmyear, parmmonth,
                                          parmoffset + cparmoffset2)
        parmoffset2 = cparmoffset2
        parmtemplate = cparmtemplate
    else:
        year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset)

    if request.isSpiderAgent and abs(currentyear - year) > 1:
        return ''  # this is a bot and it didn't follow the rules (see below)
    if currentyear == year:
        attrs = {}
    else:
        attrs = {
            'rel': 'nofollow'
        }  # otherwise even well-behaved bots will index forever

    # get the calendar
    monthcal = calendar.monthcalendar(year, month)

    # european / US differences
    months = ('January', 'February', 'March', 'April', 'May', 'June', 'July',
              'August', 'September', 'October', 'November', 'December')
    # Set things up for Monday or Sunday as the first day of the week
    if calendar.firstweekday() == calendar.MONDAY:
        wkend = (5, 6)
        wkdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
    if calendar.firstweekday() == calendar.SUNDAY:
        wkend = (0, 6)
        wkdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')

    colorstep = 85
    p = Page(request, thispage)
    qpagenames = '*'.join(
        [wikiutil.quoteWikinameURL(pn) for pn in parmpagename])
    qtemplate = wikiutil.quoteWikinameURL(parmtemplate)
    querystr = "calparms=%%s,%d,%d,%d,%%d,,,%%s" % (parmyear, parmmonth,
                                                    parmoffset)
    prevlink = p.url(request,
                     querystr % (qpagenames, parmoffset2 - 1, qtemplate))
    nextlink = p.url(request,
                     querystr % (qpagenames, parmoffset2 + 1, qtemplate))
    prevylink = p.url(request,
                      querystr % (qpagenames, parmoffset2 - 12, qtemplate))
    nextylink = p.url(request,
                      querystr % (qpagenames, parmoffset2 + 12, qtemplate))

    prevmonth = formatter.url(1, prevlink, 'cal-link', **
                              attrs) + '&lt;' + formatter.url(0)
    nextmonth = formatter.url(1, nextlink, 'cal-link', **
                              attrs) + '&gt;' + formatter.url(0)
    prevyear = formatter.url(1, prevylink, 'cal-link', **
                             attrs) + '&lt;&lt;' + formatter.url(0)
    nextyear = formatter.url(1, nextylink, 'cal-link', **
                             attrs) + '&gt;&gt;' + formatter.url(0)

    if parmpagename != [thispage]:
        pagelinks = ''
        r, g, b = (255, 0, 0)
        l = len(parmpagename[0])
        steps = len(parmpagename)
        maxsteps = (255 / colorstep)
        if steps > maxsteps:
            steps = maxsteps
        chstep = int(l / steps)
        st = 0
        while st < l:
            ch = parmpagename[0][st:st + chstep]
            r, g, b = cliprgb(r, g, b)
            link = Page(request, parmpagename[0]).link_to(
                request,
                ch,
                rel='nofollow',
                style=
                'background-color:#%02x%02x%02x;color:#000000;text-decoration:none'
                % (r, g, b))
            pagelinks = pagelinks + link
            r, g, b = (r, g + colorstep, b)
            st = st + chstep
        r, g, b = (255 - colorstep, 255, 255 - colorstep)
        for page in parmpagename[1:]:
            link = Page(request, page).link_to(
                request,
                page,
                rel='nofollow',
                style=
                'background-color:#%02x%02x%02x;color:#000000;text-decoration:none'
                % (r, g, b))
            pagelinks = pagelinks + '*' + link
        showpagename = '   %s<BR>\n' % pagelinks
    else:
        showpagename = ''
    if calendar.firstweekday() == calendar.SUNDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, months[month-1], str(year), nextmonth, nextyear)
    if calendar.firstweekday() == calendar.MONDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;/&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, str(year), month, nextmonth, nextyear)
    restr1 = ' <tr>\n%s </tr>\n' % resth1

    r7 = range(7)
    restd2 = []
    for wkday in r7:
        wday = _(wkdays[wkday])
        if wkday in wkend:
            cssday = "cal-weekend"
        else:
            cssday = "cal-workday"
        restd2.append('  <td class="%s">%s</td>\n' % (cssday, wday))
    restr2 = ' <tr>\n%s </tr>\n' % "".join(restd2)

    if parmheight6:
        while len(monthcal) < 6:
            monthcal = monthcal + [[0, 0, 0, 0, 0, 0, 0]]

    maketip_js = []
    restrn = []
    for week in monthcal:
        restdn = []
        for wkday in r7:
            day = week[wkday]
            if not day:
                restdn.append('  <td class="cal-invalidday">&nbsp;</td>\n')
            else:
                page = parmpagename[0]
                if anniversary:
                    link = "%s/%02d-%02d" % (page, month, day)
                else:
                    link = "%s/%4d-%02d-%02d" % (page, year, month, day)
                daypage = Page(request, link)
                if daypage.exists() and request.user.may.read(link):
                    csslink = "cal-usedday"
                    query = {}
                    r, g, b, u = (255, 0, 0, 1)
                    daycontent = daypage.get_raw_body()
                    header1_re = re.compile(r'^\s*=\s(.*)\s=$',
                                            re.MULTILINE)  # re.UNICODE
                    titletext = []
                    for match in header1_re.finditer(daycontent):
                        if match:
                            title = match.group(1)
                            title = wikiutil.escape(title).replace("'", "\\'")
                            titletext.append(title)
                    tipname_unescaped = link.replace("'", "\\'")
                    link = wikiutil.escape(link).replace("'", "\\'")
                    tipname = link
                    tiptitle = link
                    tiptext = '<br>'.join(titletext)
                    maketip_js.append("maketip('%s','%s','%s');" %
                                      (tipname, tiptitle, tiptext))
                    attrs = {
                        'onMouseOver': "tip('%s')" % tipname_unescaped,
                        'onMouseOut': "untip()"
                    }
                else:
                    csslink = "cal-emptyday"
                    if parmtemplate:
                        query = {'action': 'edit', 'template': parmtemplate}
                    else:
                        query = {}
                    r, g, b, u = (255, 255, 255, 0)
                    if wkday in wkend:
                        csslink = "cal-weekend"
                    attrs = {'rel': 'nofollow'}
                for otherpage in parmpagename[1:]:
                    otherlink = "%s/%4d-%02d-%02d" % (otherpage, year, month,
                                                      day)
                    otherdaypage = Page(request, otherlink)
                    if otherdaypage.exists():
                        csslink = "cal-usedday"
                        if u == 0:
                            r, g, b = (r - colorstep, g, b - colorstep)
                        else:
                            r, g, b = (r, g + colorstep, b)
                r, g, b = cliprgb(r, g, b)
                style = 'background-color:#%02x%02x%02x' % (r, g, b)
                fmtlink = formatter.url(1, daypage.url(request,
                                                       query), csslink, **
                                        attrs) + str(day) + formatter.url(0)
                if day == currentday and month == currentmonth and year == currentyear:
                    cssday = "cal-today"
                    fmtlink = "<b>%s</b>" % fmtlink  # for browser with CSS probs
                else:
                    cssday = "cal-nottoday"
                restdn.append('  <td style="%s" class="%s">%s</td>\n' %
                              (style, cssday, fmtlink))
        restrn.append(' <tr>\n%s </tr>\n' % "".join(restdn))

    restable = '<table border="2" cellspacing="2" cellpadding="2">\n<col width="14%%" span="7">%s%s%s</table>\n'
    restable = restable % (restr1, restr2, "".join(restrn))

    if maketip_js:
        tip_js = '''<script language="JavaScript" type="text/javascript">
<!--
%s
// -->
</script>
''' % '\n'.join(maketip_js)
    else:
        tip_js = ''

    result = """\
<script type="text/javascript" src="%s/common/js/infobox.js"></script>
<div id="%s" style="position:absolute; visibility:hidden; z-index:20; top:-999em; left:0px;"></div>
%s%s
""" % (request.cfg.url_prefix_static, formatter.make_id_unique('infodiv'),
       tip_js, restable)
    return formatter.rawHTML(result)
Beispiel #56
0
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M)):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = text and args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text, ))

    # prepare including page
    result = []
    print_mode = request.action in ("print", "format")
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if inc_name in this_page._macroInclude_pagelist:
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name, ))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        if not inc_page.exists():
            continue
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name

        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title()
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                url = inc_page.url(request)
                result.extend([
                    macro.formatter.heading(1, level, id=heading),
                    macro.formatter.url(1, url, css="include-heading-link"),
                    macro.formatter.text(heading),
                    macro.formatter.url(0),
                    macro.formatter.heading(0, level),
                ])

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            inc_page.send_page(content_only=True,
                               omit_footnotes=True,
                               count_hit=False)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                macro.formatter.div(1, css_class="include-link"),
                inc_page.link_to(request, '[%s]' % (inc_name, ), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                macro.formatter.div(0),
            ])
        # XXX page.link_to is wrong now, it escapes the edit_icon html as it escapes normal text

    # return include text
    return ''.join(result)