示例#1
0
def findMatches(pagename, request, s_re=None, e_re=None):
    """ Find like pages

    @param pagename: name to match
    @param request: current reqeust
    @param s_re: start re for wiki matching
    @param e_re: end re for wiki matching
    @rtype: tuple
    @return: start word, end word, matches dict
    """
    # Get full list of pages, with no filtering - very fast. We will
    # first search for like pages, then filter the results.
    pages = request.rootpage.getPageList(user='', exists='')

    # Remove current page
    try:
        pages.remove(pagename)
    except ValueError:
        pass

    # Get matches using wiki way, start and end of word
    start, end, matches = wikiMatches(pagename,
                                      pages,
                                      start_re=s_re,
                                      end_re=e_re)

    # Get the best 10 close matches
    close_matches = {}
    found = 0
    for name in closeMatches(pagename, pages):
        # Skip names already in matches
        if name in matches:
            continue

        # Filter deleted pages or pages the user can't read
        page = Page(request, name)
        if page.exists() and request.user.may.read(name):
            close_matches[name] = 8
            found += 1
            # Stop after 10 matches
            if found == 10:
                break

    # Filter deleted pages or pages the user can't read from
    # matches. Order is important!
    for name in matches.keys():  # we need .keys() because we modify the dict
        page = Page(request, name)
        if not (page.exists() and request.user.may.read(name)):
            del matches[name]

    # Finally, merge both dicts
    matches.update(close_matches)

    return start, end, matches
示例#2
0
def findMatches(pagename, request, s_re=None, e_re=None,):
    """ Find like pages
    
    @param pagename: name to match
    @param request: current reqeust
    @param s_re: start re for wiki matching
    @param e_re: end re for wiki matching
    @rtype: tuple
    @return: start word, end word, matches dict
    """
    # Get full list of pages, with no filtering - very fast. We will
    # first search for like pages, then filter the results.
    pages = request.rootpage.getPageList(user='', exists='')

    # Remove current page
    try:
        pages.remove(pagename)
    except ValueError:
        pass

    # Get matches using wiki way, start and end of word
    start, end, matches = wikiMatches(pagename, pages, start_re=s_re,
                                      end_re=e_re)

    # Get the best 10 close matches using difflib
    close_matches = {}
    found = 0
    for name in closeMatches(pagename, pages):
        # Skip names already in matches
        if name in matches:
            continue

        # Filter deleted pages or pages the user can't read
        page = Page(request, name)
        if page.exists() and request.user.may.read(name):
            close_matches[name] = 8
            found += 1
            # Stop after 10 matches
            if found == 10:
                break
    
    # Filter deleted pages or pages the user can't read from
    # matches. Order is important!
    for name in matches.keys():
        page = Page(request, name)
        if not (page.exists() and request.user.may.read(name)):
            del matches[name]    

    # Finally, merge both dicts
    matches.update(close_matches)

    return start, end, matches
示例#3
0
    def _load_dict(self):
        request = self.request
        dict_name = self.name

        page = Page(request, dict_name)
        if page.exists():
            arena = "pagedicts"
            key = wikiutil.quoteWikinameFS(dict_name)
            cache = caching.CacheEntry(request, arena, key, scope="wiki", use_pickle=True)
            try:
                cache_mtime = cache.mtime()
                page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
                # TODO: fix up-to-date check mtime granularity problems.
                #
                # cache_mtime is float while page_mtime is integer
                # The comparision needs to be done on the lowest type of both
                if int(cache_mtime) > int(page_mtime):
                    # cache is uptodate
                    return cache.content()
                else:
                    raise caching.CacheError
            except caching.CacheError:
                # either cache does not exist, is erroneous or not uptodate: recreate it
                d = super(WikiDict, self)._load_dict()
                cache.update(d)
                return d
        else:
            raise DictDoesNotExistError(dict_name)
示例#4
0
 def include(self, name, arguments, options, content, lineno,
             content_offset, block_text, state, state_machine):
     # content contains the included file name
     
     _ = self.request.getText
     
     if len(content):
         if content[0] in self.included_documents:
             lines = [_("**Duplicate included files are not permitted**")]
             state_machine.insert_input(lines, 'MoinDirectives')
             return
         self.included_documents.append(content[0])
         page = Page(page_name = content[0], request = self.request)
         if page.exists():
             text = page.get_raw_body()
             lines = text.split('\n')
             # Remove the "#format rst" line
             if lines[0].startswith("#format"):
                 del lines[0]
         else:
             lines = [_("**Could not find the referenced page: %s**") % (content[0],)]
         # Insert the text from the included document and then continue
         # parsing
         state_machine.insert_input(lines, 'MoinDirectives')
     return
示例#5
0
def move_file(request, pagename, new_pagename, attachment, new_attachment):
    _ = request.getText

    newpage = Page(request, new_pagename)
    if newpage.exists(includeDeleted=1) and request.user.may.write(new_pagename) and request.user.may.delete(pagename):
        new_attachment_path = os.path.join(getAttachDir(request, new_pagename,
                              create=1), new_attachment).encode(config.charset)
        attachment_path = os.path.join(getAttachDir(request, pagename),
                          attachment).encode(config.charset)

        if os.path.exists(new_attachment_path):
            upload_form(pagename, request,
                msg=_("Attachment '%(new_pagename)s/%(new_filename)s' already exists.") % {
                    'new_pagename': new_pagename,
                    'new_filename': new_attachment})
            return

        if new_attachment_path != attachment_path:
            # move file
            filesys.rename(attachment_path, new_attachment_path)
            _addLogEntry(request, 'ATTDEL', pagename, attachment)
            _addLogEntry(request, 'ATTNEW', new_pagename, new_attachment)
            upload_form(pagename, request,
                        msg=_("Attachment '%(pagename)s/%(filename)s' moved to '%(new_pagename)s/%(new_filename)s'.") % {
                            'pagename': pagename,
                            'filename': attachment,
                            'new_pagename': new_pagename,
                            'new_filename': new_attachment})
        else:
            upload_form(pagename, request, msg=_("Nothing changed"))
    else:
        upload_form(pagename, request, msg=_("Page '%(new_pagename)s' does not exist or you don't have enough rights.") % {
            'new_pagename': new_pagename})
示例#6
0
    def addTrail(self, pagename):
        """
        Add page to trail.
        
        @param pagename: the page name to add to the trail
        """
        if self.valid and (self.show_page_trail or self.remember_last_visit):
            # load trail if not known
            self.getTrail()      
            
            # don't append tail to trail ;)
            if self._trail and self._trail[-1] == pagename: return

            # Add only existing pages that the user may read
            if self._request:
                from MoinMoin.Page import Page
                page = Page(self._request, pagename)
                if not (page.exists() and
                        self._request.user.may.read(page.page_name)):
                    return

            # append new page, limiting the length
            self._trail = filter(lambda p, pn=pagename: p != pn, self._trail)
            self._trail = self._trail[-(self._cfg.trail_size-1):]
            self._trail.append(pagename)

            # save new trail
            trailfile = codecs.open(self.__filename() + ".trail", "w", config.charset)
            for t in self._trail:
                trailfile.write('%s\n' % t)
            trailfile.close()
            try:
                os.chmod(self.__filename() + ".trail", 0666 & config.umask)
            except OSError:
                pass
示例#7
0
 def xmlrpc_getProcessingInstructionVersion(self, pagename, rev, pi):
     """
     Returns value of Processing Instruction
     @param pagename: pagename (utf-8)
     @param rev: revision number (int)
     @param pi: PI key
     """
     # User may read page?
     if not self.request.user.may.read(pagename):
         return self.notAllowedFault()
     if rev is not None:
         page = Page(self.request, pagename, rev=rev)
     else:
         page = Page(self.request, pagename)
     if not page.exists():
         return xmlrpclib.Fault("NOT_EXIST", "Page does not exist.")
     if pi is None:
         return xmlrpclib.Fault("NOT_EXIST",
                                "Processing Instruction not given.")
     try:
         instruction = page.pi[pi]
     except KeyError:
         return xmlrpclib.Fault("NOT_EXIST",
                                "Processing Instruction does not exist.")
     return self._outstr(instruction)
示例#8
0
    def include(self, name, arguments, options, content, lineno,
                content_offset, block_text, state, state_machine):
        # content contains the included file name

        _ = self.request.getText

        if len(content):
            if content[0] in self.included_documents:
                lines = [_("**Duplicate included files are not permitted**")]
                state_machine.insert_input(lines, 'MoinDirectives')
                return
            self.included_documents.append(content[0])
            page = Page(page_name=content[0], request=self.request)
            if page.exists():
                text = page.get_raw_body()
                lines = text.split('\n')
                # Remove the "#format rst" line
                if lines[0].startswith("#format"):
                    del lines[0]
            else:
                lines = [
                    _("**Could not find the referenced page: %s**") %
                    (content[0], )
                ]
            # Insert the text from the included document and then continue
            # parsing
            state_machine.insert_input(lines, 'MoinDirectives')
        return
示例#9
0
    def xmlrpc_listLinks(self, pagename):
        """
        list links for a given page
        @param pagename: the page name
        @rtype: list
        @return: links of the page, structs, with the following elements
            * name (string) : The page name or URL the link is to, UTF-8 encoding.
            * type (int) : The link type. Zero (0) for internal Wiki
              link, one (1) for external link (URL - image link, whatever).
        """
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()
        
        links_out = []
        for link in page.getPageLinks(self.request):
            links_out.append({ 'name': self._outstr(link), 'type': 0 })
        return links_out
示例#10
0
文件: CustomTag.py 项目: happytk/moin
def find_by_re (request, formatter, re_exp, rpl_text, search):

    ret = []

    if not hasattr(request, "_Include_backto"):
        request._Include_backto = formatter.page.page_name

    if search == None:
        pages = getPageListFromLog(request)
        ret.append('Results from recent-log(%d pages):' % len(pages))
    else:
        pages = getPageListFromSearch(request,search)
        ret.append('Results from searching by __%s__(%d pages):' % (search, len(pages)))

    task_re = re.compile(re_exp, re.UNICODE|re.DOTALL)

    for pagename in pages:

        if pagename == formatter.page.page_name: continue

        page = Page(request, pagename)
        if not page.exists(): continue

        ret.append(findtext_by_re(request, task_re, re_exp, rpl_text, page))

    return wikiutil.renderText(request, WikiParser, '\n'.join(ret))#wikiutil.escape('\n'.join(ret)))
示例#11
0
    def xmlrpc_listLinks(self, pagename):
        """
        list links for a given page
        @param pagename: the page name
        @rtype: list
        @return: links of the page, structs, with the following elements
            * name (string) : The page name or URL the link is to, UTF-8 encoding.
            * type (int) : The link type. Zero (0) for internal Wiki
              link, one (1) for external link (URL - image link, whatever).
        """
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        links_out = []
        for link in page.getPageLinks(self.request):
            links_out.append({'name': self._outstr(link), 'type': 0})
        return links_out
示例#12
0
    def _load_group(self):
        request = self.request
        group_name = self.name

        page = Page(request, group_name)
        if page.exists():
            arena = 'pagegroups'
            key = wikiutil.quoteWikinameFS(group_name)
            cache = caching.CacheEntry(request, arena, key, scope='wiki', use_pickle=True)
            try:
                cache_mtime = cache.mtime()
                page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
                # TODO: fix up-to-date check mtime granularity problems.
                #
                # cache_mtime is float while page_mtime is integer
                # The comparision needs to be done on the lowest type of both
                if int(cache_mtime) > int(page_mtime):
                    # cache is uptodate
                    return cache.content()
                else:
                    raise caching.CacheError
            except caching.CacheError:
                # either cache does not exist, is erroneous or not uptodate: recreate it
                members, member_groups = super(WikiGroup, self)._load_group()
                cache.update((members, member_groups))
                return members, member_groups
        else:
            raise GroupDoesNotExistError(group_name)
示例#13
0
文件: text_rst.py 项目: aahlad/soar
    def include(self, name, arguments, options, content, lineno,
                content_offset, block_text, state, state_machine):
        # content contains the included file name

        _ = self.request.getText

        # Limit the number of documents that can be included
        if self.num_includes < self.max_includes:
            self.num_includes += 1
        else:
            lines = [_("**Maximum number of allowed includes exceeded**")]
            state_machine.insert_input(lines, 'MoinDirectives')
            return

        if len(content):
            pagename = content[0]
            page = Page(page_name=pagename, request=self.request)
            if not self.request.user.may.read(pagename):
                lines = [_("**You are not allowed to read the page: %s**") % (pagename, )]
            else:
                if page.exists():
                    text = page.get_raw_body()
                    lines = text.split('\n')
                    # Remove the "#format rst" line
                    if lines[0].startswith("#format"):
                        del lines[0]
                else:
                    lines = [_("**Could not find the referenced page: %s**") % (pagename, )]
            # Insert the text from the included document and then continue parsing
            state_machine.insert_input(lines, 'MoinDirectives')
        return
示例#14
0
    def save(self, editor, newtext, rev, **kw):
        request = self.request
        username = request.user.name
        pagename = editor.page_name

        if grouppage_autocreate and username == pagename:
            # create group pages when a user saves his own homepage
            for page in grouppages:
                grouppagename = "%s/%s" % (username, page)
                grouppage = Page(request, grouppagename)
                if not grouppage.exists():
                    text = """\
#acl %(username)s:read,write,delete,revert
 * %(username)s
""" % locals()
                    editor = PageEditor(request, grouppagename)
                    editor._write_file(text)

        parts = pagename.split('/')
        if len(parts) == 2:
            subpage = parts[1]
            if subpage in grouppages and not self.admin(pagename):
                return False

        # No problem to save if my base class agrees
        return Permissions.save(self, editor, newtext, rev, **kw)
示例#15
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        cnt = 0
        script = [packLine(['MoinMoinPackage', '1']), ]

        for pagename in pagelist:
            pagename = pagename.strip()
            page = Page(request, pagename)
            if page.exists():
                cnt += 1
                script.append(packLine([function, str(cnt), pagename]))
                timestamp = wikiutil.version2timestamp(page.mtime_usecs())
                zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
                zi.compress_type = COMPRESSION_LEVEL
                zf.writestr(zi, page.get_raw_body().encode("utf-8"))
            else:
                #import sys
                #print >>sys.stderr, "Could not find the page %s." % pagename.encode("utf-8")
                pass

        script += [packLine(['Print', 'Installed MoinMaster page bundle %s.' % os.path.basename(filename)])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
示例#16
0
def execute(pagename, request):
    _ = request.getText
    sub_page_name = request.cfg.supplementation_page_name
    sub_page_template = request.cfg.supplementation_page_template
    newpagename = "%s/%s" % (pagename, sub_page_name)
    errormsg = _("You are not allowed to create the supplementation page.")

    if pagename.endswith(sub_page_name):  # sub_sub_page redirects to sub_page
        query = {}
        url = Page(request, pagename).url(request, query)
        request.http_redirect(url)
    elif request.user.may.read(newpagename):
        query = {}
        url = Page(request, newpagename).url(request, query)
        test = Page(request, newpagename)
        if test.exists():  # page is defined -> redirect
            request.http_redirect(url)
        elif request.user.may.write(
                newpagename):  # page will be created from template
            query = {
                'action': 'edit',
                'backto': newpagename,
                'template': quoteWikinameURL(sub_page_template)
            }
            url = Page(request, newpagename).url(request, query)
            request.http_redirect(url)
        else:
            request.theme.add_msg(errormsg, "error")
    else:
        request.theme.add_msg(errormsg, "error")
示例#17
0
    def xmlrpc_getPageVersion(self, pagename, rev):
        """ Get raw text from specific revision of pagename
        
        @param pagename: pagename (utf-8)
        @param rev: revision number (int)
        @rtype: str
        @return: utf-8 encoded page data
        """    
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        if rev != None:
            page = Page(self.request, pagename, rev=rev)
        else:
            page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Return page raw text
        if self.version == 2:
            return self._outstr(page.get_raw_body())
        elif self.version == 1:
            return self._outlob(page.get_raw_body())
示例#18
0
    def save(self, editor, newtext, rev, **kw):
        request = self.request
        username = request.user.name
        pagename = editor.page_name

        if grouppage_autocreate and username == pagename:
            # create group pages when a user saves his own homepage
            for page in grouppages:
                grouppagename = "%s/%s" % (username, page)
                grouppage = Page(request, grouppagename)
                if not grouppage.exists():
                    text = """\
#acl %(username)s:read,write,delete,revert
 * %(username)s
""" % locals()
                    editor = PageEditor(request, grouppagename)
                    editor._write_file(text)

        parts = pagename.split('/')
        if len(parts) == 2:
            subpage = parts[1]
            if subpage in grouppages and not self.admin(pagename):
                return False

        # No problem to save if my base class agrees
        return Permissions.save(self, editor, newtext, rev, **kw)
示例#19
0
    def xmlrpc_getPageVersion(self, pagename, rev):
        """
        Get raw text from specific revision of pagename

        @param pagename: pagename (utf-8)
        @param rev: revision number (int)
        @rtype: str
        @return: utf-8 encoded page data
        """
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pagename, rev=rev)
        else:
            page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Return page raw text
        if self.version == 2:
            return self._outstr(page.get_raw_body())
        elif self.version == 1:
            return self._outlob(page.get_raw_body())
示例#20
0
    def xmlrpc_getPageHTMLVersion(self, pagename, rev):
        """
        Get HTML of from specific revision of pagename

        @param pagename: the page name (utf-8)
        @param rev: revision number (int)
        @rtype: str
        @return: page in rendered HTML (utf-8)
        """
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pagename, rev=rev)
        else:
            page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Render page into a buffer
        result = self.request.redirectedOutput(page.send_page, content_only=1)

        # Return rendered page
        if self.version == 2:
            return self._outstr(result)
        elif self.version == 1:
            return xmlrpclib.Binary(result)
示例#21
0
    def _load_group(self):
        request = self.request
        group_name = self.name

        page = Page(request, group_name)
        if page.exists():
            arena = 'pagegroups'
            key = wikiutil.quoteWikinameFS(group_name)
            cache = caching.CacheEntry(request,
                                       arena,
                                       key,
                                       scope='wiki',
                                       use_pickle=True)
            try:
                cache_mtime = cache.mtime()
                page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
                # TODO: fix up-to-date check mtime granularity problems.
                #
                # cache_mtime is float while page_mtime is integer
                # The comparision needs to be done on the lowest type of both
                if int(cache_mtime) > int(page_mtime):
                    # cache is uptodate
                    return cache.content()
                else:
                    raise caching.CacheError
            except caching.CacheError:
                # either cache does not exist, is erroneous or not uptodate: recreate it
                members, member_groups = super(WikiGroup, self)._load_group()
                cache.update((members, member_groups))
                return members, member_groups
        else:
            raise GroupDoesNotExistError(group_name)
    def xmlrpc_getPageHTMLVersion(self, pagename, rev):
        """
        Get HTML of from specific revision of pagename

        @param pagename: the page name (utf-8)
        @param rev: revision number (int)
        @rtype: str
        @return: page in rendered HTML (utf-8)
        """
        pagename = self._instr(pagename)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pagename, rev=rev)
        else:
            page = Page(self.request, pagename)

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Render page into a buffer
        result = self.request.redirectedOutput(page.send_page, content_only=1)

        # Return rendered page
        if self.version == 2:
            return self._outstr(result)
        elif self.version == 1:
            return xmlrpclib.Binary(result)
示例#23
0
def execute(pagename, request):
    _ = request.getText
    sub_page_name = request.cfg.supplementation_page_name
    sub_page_template = request.cfg.supplementation_page_template
    newpagename = "%s/%s" % (pagename, sub_page_name)
    errormsg = _("You are not allowed to create the supplementation page.")

    if pagename.endswith(sub_page_name): # sub_sub_page redirects to sub_page
        query = {}
        url = Page(request, pagename).url(request, query)
        request.http_redirect(url)
    elif request.user.may.read(newpagename):
        query = {}
        url = Page(request, newpagename).url(request, query)
        test = Page(request, newpagename)
        if test.exists(): # page is defined -> redirect
            request.http_redirect(url)
        elif request.user.may.write(newpagename):  # page will be created from template
            query = {'action': 'edit', 'backto': newpagename, 'template': quoteWikinameURL(sub_page_template)}
            url = Page(request, newpagename).url(request, query)
            request.http_redirect(url)
        else:
            request.theme.add_msg(errormsg, "error")
    else:
        request.theme.add_msg(errormsg, "error")
示例#24
0
    def addTrail(self, pagename):
        """
        Add page to trail.
        
        @param pagename: the page name to add to the trail
        """
        # TODO: acquire lock here, so multiple processes don't clober
        # each one trail.

        if self.valid and (self.show_page_trail or self.remember_last_visit):
            # load trail if not known
            self.getTrail()      
            
            # don't append tail to trail ;)
            if self._trail and self._trail[-1] == pagename: return

            # Add only existing pages that the user may read
            if self._request:
                from MoinMoin.Page import Page
                page = Page(self._request, pagename)
                if not (page.exists() and
                        self._request.user.may.read(page.page_name)):
                    return

            # append new page, limiting the length
            self._trail = filter(lambda p, pn=pagename: p != pn, self._trail)
            self._trail = self._trail[-(self._cfg.trail_size-1):]
            self._trail.append(pagename)
            self.saveTrail()
示例#25
0
class PackagePages:
    def __init__(self, pagename, request):
        self.request = request
        self.pagename = pagename
        self.page = Page(request, pagename)

    def allowed(self):
        """ Check if user is allowed to do this. """
        return not self.__class__.__name__ in self.request.cfg.actions_excluded

    def render(self):
        """ Render action

        This action returns a wiki page with optional message, or
        redirects to new page.
        """
        _ = self.request.getText

        if 'cancel' in self.request.values:
            # User canceled
            return self.page.send_page()

        try:
            if not self.allowed():
                self.request.theme.add_msg(_('You are not allowed to edit this page.'), "error")
                raise ActionError
            elif not self.page.exists():
                self.request.theme.add_msg(_('This page is already deleted or was never created!'))
                raise ActionError

            self.package()
        except ActionError, e:
            return self.page.send_page()
示例#26
0
class PackagePages:
    def __init__(self, pagename, request):
        self.request = request
        self.pagename = pagename
        self.page = Page(request, pagename)

    def allowed(self):
        """ Check if user is allowed to do this. """
        return not self.__class__.__name__ in self.request.cfg.actions_excluded

    def render(self):
        """ Render action

        This action returns a wiki page with optional message, or
        redirects to new page.
        """
        _ = self.request.getText

        if 'cancel' in self.request.values:
            # User canceled
            return self.page.send_page()

        try:
            if not self.allowed():
                self.request.theme.add_msg(
                    _('You are not allowed to edit this page.'), "error")
                raise ActionError
            elif not self.page.exists():
                self.request.theme.add_msg(
                    _('This page is already deleted or was never created!'))
                raise ActionError

            self.package()
        except ActionError, e:
            return self.page.send_page()
示例#27
0
    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1), f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1), f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        from MoinMoin.support.python_compatibility import hash_new
        digest = hash_new('sha1',
                          page.get_raw_body().encode(
                              config.charset)).hexdigest().upper()
        request.write(
            f.paragraph(1),
            f.rawHTML(
                '%(label)s <tt>%(value)s</tt>' % {
                    'label': _("SHA digest of this page's content is:"),
                    'value': digest,
                }), f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request,
                                          include_self=1,
                                          return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(
                f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(
                            f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(
                    f.rawHTML("%s%s " %
                              (Page(request, linkedpage).link_to(request),
                               ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))
示例#28
0
文件: PageHits.py 项目: imosts/flume
 def filterReadableHits(self, hits):
     """ Filter out hits the user many not see """
     userMayRead = self.request.user.may.read
     for pagename in hits.keys():
         page = Page(self.request, pagename)
         if page.exists() and userMayRead(pagename):
             continue
         del hits[pagename]
示例#29
0
    def xmlrpc_getPageInfoVersion(self, pagename, rev):
        """
        Return page information for specific revision

        @param pagename: the name of the page (utf-8)
        @param rev: revision to get info about (int)
        @rtype: dict
        @return: page information
            * name (string): the canonical page name, UTF-8.
            * lastModified (date): Last modification date, UTC.
            * author (string): author name, UTF-8.
            * version (int): current version

        """
        pn = self._instr(pagename)

        # User may read this page?
        if not self.request.user.may.read(pn):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pn, rev=rev)
        else:
            page = Page(self.request, pn)
            rev = page.current_rev()

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Get page info
        edit_info = page.edit_info()
        if not edit_info:
            return self.noLogEntryFault()

        mtime = wikiutil.version2timestamp(long(
            edit_info['timestamp']))  # must be long for py 2.2.x
        gmtuple = tuple(time.gmtime(mtime))

        version = rev  # our new rev numbers: 1,2,3,4,....

        #######################################################################
        # BACKWARDS COMPATIBILITY CODE - remove when 1.2.x is regarded stone age
        # as we run a feed for BadContent on MoinMaster, we want to stay
        # compatible here for a while with 1.2.x moins asking us for BadContent
        # 1.3 uses the lastModified field for checking for updates, so it
        # should be no problem putting the old UNIX timestamp style of version
        # number in the version field
        if self.request.cfg.sitename == 'MoinMaster' and pagename == 'BadContent':
            version = int(mtime)
        #######################################################################

        return {
            'name': self._outstr(page.page_name),
            'lastModified': xmlrpclib.DateTime(gmtuple),
            'author': self._outstr(edit_info['editor']),
            'version': version,
        }
示例#30
0
    def collectpackage(self, pagelist, fileobject, pkgname="", include_attachments=False):
        """ Expects a list of pages as an argument, and fileobject to be an open
        file object, which a zipfile will get written to.

        @param pagelist: pages to package
        @param fileobject: open file object to write to
        @param pkgname: optional file name, to prevent self packaging
        @rtype: string or None
        @return: error message, if one happened
        @rtype: boolean
        @param include_attachments: True if you want attachments collected
        """
        _ = self.request.getText
        COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED

        pages = []
        for pagename in pagelist:
            pagename = wikiutil.normalize_pagename(pagename, self.request.cfg)
            if pagename:
                page = Page(self.request, pagename)
                if page.exists() and self.request.user.may.read(pagename):
                    pages.append(page)
        if not pages:
            return (_('No pages like "%s"!') % wikiutil.escape(pagelist))

        # Set zipfile output
        zf = zipfile.ZipFile(fileobject, "w", COMPRESSION_LEVEL)

        cnt = 0
        userid = user.getUserIdentification(self.request)
        script = [packLine(['MoinMoinPackage', '1']), ]

        for page in pages:
            cnt += 1
            files = _get_files(self.request, page.page_name)
            script.append(packLine(["AddRevision", str(cnt), page.page_name, userid, "Created by the PackagePages action."]))

            timestamp = wikiutil.version2timestamp(page.mtime_usecs())

            # avoid getting strange exceptions from zipfile in case of pre-1980 timestamps
            nineteeneighty = (10 * 365 + 3) * 24 * 3600 # 1970 + 10y + 3d
            timestamp = max(nineteeneighty, timestamp) # zip can not store timestamps before 1980

            zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))
            if include_attachments:
                for attname in files:
                    if attname != pkgname:
                        cnt += 1
                        zipname = "%d_attachment" % cnt
                        script.append(packLine(["AddAttachment", zipname, attname, page.page_name, userid, "Created by the PackagePages action."]))
                        filename = AttachFile.getFilename(self.request, page.page_name, attname)
                        zf.write(filename, zipname)
        script += [packLine(['Print', 'Thank you for using PackagePages!'])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
示例#31
0
def load_template(request, name):
    if not request.user.may.read(name):
        raise InviteException("You are not allowed to read template page '%s'." % name)

    page = Page(request, name)
    if not page.exists():
        raise InviteException("Template page '%s' does not exist." % name)

    return page.get_raw_body()
示例#32
0
def handle_action(context, pagename, action_name='show'):
    """ Actual dispatcher function for non-XMLRPC actions.

    Also sets up the Page object for this request, normalizes and
    redirects to canonical pagenames and checks for non-allowed
    actions.
    """
    _ = context.getText
    cfg = context.cfg

    # pagename could be empty after normalization e.g. '///' -> ''
    # Use localized FrontPage if pagename is empty
    if not pagename:
        context.page = wikiutil.getFrontPage(context)
    else:
        context.page = Page(context, pagename)
        if '_' in pagename and not context.page.exists():
            pagename = pagename.replace('_', ' ')
            page = Page(context, pagename)
            if page.exists():
                url = page.url(context)
                return context.http_redirect(url)

    msg = None
    # Complain about unknown actions
    if not action_name in get_names(cfg):
        msg = _("Unknown action %(action_name)s.") % {
                'action_name': wikiutil.escape(action_name), }

    # Disallow non available actions
    elif action_name[0].isupper() and not action_name in \
            get_available_actions(cfg, context.page, context.user):
        msg = _("You are not allowed to do %(action_name)s on this page.") % {
                'action_name': wikiutil.escape(action_name), }
        if not context.user.valid:
            # Suggest non valid user to login
            msg += " " + _("Login and try again.")

    if msg:
        context.theme.add_msg(msg, "error")
        context.page.send_page()
    # Try action
    else:
        from MoinMoin import action
        handler = action.getHandler(context, action_name)
        if handler is None:
            msg = _("You are not allowed to do %(action_name)s on this page.") % {
                    'action_name': wikiutil.escape(action_name), }
            if not context.user.valid:
                # Suggest non valid user to login
                msg += " " + _("Login and try again.")
            context.theme.add_msg(msg, "error")
            context.page.send_page()
        else:
            handler(context.page.page_name, context)

    return context
    def xmlrpc_getPageInfoVersion(self, pagename, rev):
        """
        Return page information for specific revision

        @param pagename: the name of the page (utf-8)
        @param rev: revision to get info about (int)
        @rtype: dict
        @return: page information
            * name (string): the canonical page name, UTF-8.
            * lastModified (date): Last modification date, UTC.
            * author (string): author name, UTF-8.
            * version (int): current version

        """
        pn = self._instr(pagename)

        # User may read this page?
        if not self.request.user.may.read(pn):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pn, rev=rev)
        else:
            page = Page(self.request, pn)
            rev = page.current_rev()

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Get page info
        edit_info = page.edit_info()
        if not edit_info:
            return self.noLogEntryFault()

        mtime = wikiutil.version2timestamp(long(edit_info['timestamp'])) # must be long for py 2.2.x
        gmtuple = tuple(time.gmtime(mtime))

        version = rev # our new rev numbers: 1,2,3,4,....

        #######################################################################
        # BACKWARDS COMPATIBILITY CODE - remove when 1.2.x is regarded stone age
        # as we run a feed for BadContent on MoinMaster, we want to stay
        # compatible here for a while with 1.2.x moins asking us for BadContent
        # 1.3 uses the lastModified field for checking for updates, so it
        # should be no problem putting the old UNIX timestamp style of version
        # number in the version field
        if self.request.cfg.sitename == 'MoinMaster' and pagename == 'BadContent':
            version = int(mtime)
        #######################################################################

        return {
            'name': self._outstr(page.page_name),
            'lastModified': xmlrpclib.DateTime(gmtuple),
            'author': self._outstr(edit_info['editor']),
            'version': version,
            }
示例#34
0
文件: wsgiapp.py 项目: happytk/moin
def handle_action(context, pagename, action_name='show'):
    """ Actual dispatcher function for non-XMLRPC actions.

    Also sets up the Page object for this request, normalizes and
    redirects to canonical pagenames and checks for non-allowed
    actions.
    """
    _ = context.getText
    cfg = context.cfg

    # pagename could be empty after normalization e.g. '///' -> ''
    # Use localized FrontPage if pagename is empty
    if not pagename:
        context.page = wikiutil.getFrontPage(context)
    else:
        context.page = Page(context, pagename)
        if '_' in pagename and not context.page.exists():
            pagename = pagename.replace('_', ' ')
            page = Page(context, pagename)
            if page.exists():
                url = page.url(context)
                return context.http_redirect(url)

    msg = None
    # Complain about unknown actions
    if not action_name in get_names(cfg):
        msg = _("Unknown action %(action_name)s.") % {
                'action_name': wikiutil.escape(action_name), }

    # Disallow non available actions
    elif action_name[0].isupper() and not action_name in \
            get_available_actions(cfg, context.page, context.user):
        msg = _("You are not allowed to do %(action_name)s on this page.") % {
                'action_name': wikiutil.escape(action_name), }
        if not context.user.valid:
            # Suggest non valid user to login
            msg += " " + _("Login and try again.")

    if msg:
        context.theme.add_msg(msg, "error")
        context.page.send_page()
    # Try action
    else:
        from MoinMoin import action
        handler = action.getHandler(context, action_name)
        if handler is None:
            msg = _("You are not allowed to do %(action_name)s on this page.") % {
                    'action_name': wikiutil.escape(action_name), }
            if not context.user.valid:
                # Suggest non valid user to login
                msg += " " + _("Login and try again.")
            context.theme.add_msg(msg, "error")
            context.page.send_page()
        else:
            handler(context.page.page_name, context)

    return context
示例#35
0
 def filterReadableHits(self, hits):
     """ Filter out hits the user many not see """
     userMayRead = self.request.user.may.read
     for pagename in hits.keys(
     ):  # we need .keys() because we modify the dict
         page = Page(self.request, pagename)
         if page.exists() and userMayRead(pagename):
             continue
         del hits[pagename]
示例#36
0
def load_template(request, name):
    if not request.user.may.read(name):
        raise InviteException(
            "You are not allowed to read template page '%s'." % name)

    page = Page(request, name)
    if not page.exists():
        raise InviteException("Template page '%s' does not exist." % name)

    return page.get_raw_body()
示例#37
0
    def _index_page(self, request, connection, pagename, mode="update"):
        """ Index a page.

        Index all revisions (if wanted by configuration) and all attachments.

        @param request: request suitable for indexing
        @param connection: the Indexer connection object
        @param pagename: a page name
        @param mode: 'add' = just add, no checks
                     'update' = check if already in index and update if needed (mtime)
        """
        page = Page(request, pagename)
        revlist = page.getRevList()  # recent revs first, does not include deleted revs
        logging.debug("indexing page %r, %d revs found" % (pagename, len(revlist)))

        if not revlist:
            # we have an empty revision list, that means the page is not there any more,
            # likely it (== all of its revisions, all of its attachments) got either renamed or nuked
            wikiname = request.cfg.interwikiname or u"Self"

            sc = self.get_search_connection()
            docs_to_delete = sc.get_all_documents_with_fields(wikiname=wikiname, pagename=pagename)
            # any page rev, any attachment
            sc.close()

            for doc in docs_to_delete:
                connection.delete(doc.id)
            logging.debug("page %s (all revs, all attachments) removed from xapian index" % pagename)

        else:
            if request.cfg.xapian_index_history:
                index_revs, remove_revs = revlist, []
            else:
                if page.exists():  # is current rev not deleted?
                    index_revs, remove_revs = revlist[:1], revlist[1:]
                else:
                    index_revs, remove_revs = [], revlist

            for revno in index_revs:
                updated = self._index_page_rev(request, connection, pagename, revno, mode=mode)
                logging.debug("updated page %r rev %d (updated==%r)" % (pagename, revno, updated))
                if not updated:
                    # we reached the revisions that are already present in the index
                    break

            for revno in remove_revs:
                # XXX remove_revs can be rather long for pages with many revs and
                # XXX most page revs usually will be already deleted. optimize?
                self._remove_page_rev(request, connection, pagename, revno)
                logging.debug("removed page %r rev %d" % (pagename, revno))

            from MoinMoin.action import AttachFile

            for attachmentname in AttachFile._get_files(request, pagename):
                self._index_attachment(request, connection, pagename, attachmentname, mode)
示例#38
0
def execute(macro, args):
    request = macro.request
    
    # get number of wanted links        
    try:
        links = max(int(args), 1)
    except StandardError:
        links = 1

    # Get full page unfiltered page list - very fast!
    all_pages = request.rootpage.getPageList(user='', exists=0)

    # Now select random page from the full list, and if it exists and we
    # can read it, save.
    pages = []
    found = 0
    while found < links and all_pages:
        # Take one random page from the list
        pagename = random.choice(all_pages)
        all_pages.remove(pagename)
        
        # Filter out deleted pages or pages the user may not read.
        page = Page(request, pagename)
        if page.exists() and request.user.may.read(pagename):
            pages.append(pagename)
            found += 1
            
    if not pages:
        return ''

    f = macro.formatter

    # return a single page link
    if links == 1:
        name = pages[0]
        return (f.pagelink(1, name, generated=1) +
                f.text(name) +
                f.pagelink(0, name))

    # return a list of page links
    pages.sort()
    result = []
    write = result.append

    write(f.bullet_list(1))
    for name in pages:
        write(f.listitem(1))
        write(f.pagelink(1, name, generated=1))
        write(f.text(name))
        write(f.pagelink(0, name))
        write(f.listitem(0))
    write(f.bullet_list(0))

    result = ''.join(result)
    return result
示例#39
0
文件: AttachFile.py 项目: aahlad/soar
def move_file(request, pagename, new_pagename, attachment, new_attachment):
    _ = request.getText

    newpage = Page(request, new_pagename)
    if newpage.exists(includeDeleted=1) and request.user.may.write(
            new_pagename) and request.user.may.delete(pagename):
        new_attachment_path = os.path.join(
            getAttachDir(request, new_pagename, create=1),
            new_attachment).encode(config.charset)
        attachment_path = os.path.join(getAttachDir(request, pagename),
                                       attachment).encode(config.charset)

        if os.path.exists(new_attachment_path):
            upload_form(
                pagename,
                request,
                msg=
                _("Attachment '%(new_pagename)s/%(new_filename)s' already exists."
                  ) % {
                      'new_pagename': new_pagename,
                      'new_filename': new_attachment
                  })
            return

        if new_attachment_path != attachment_path:
            filesize = os.path.getsize(attachment_path)
            filesys.rename(attachment_path, new_attachment_path)
            _addLogEntry(request, 'ATTDEL', pagename, attachment)
            event = FileRemovedEvent(request, pagename, attachment, filesize)
            send_event(event)
            _addLogEntry(request, 'ATTNEW', new_pagename, new_attachment)
            event = FileAttachedEvent(request, new_pagename, new_attachment,
                                      filesize)
            send_event(event)
            upload_form(
                pagename,
                request,
                msg=
                _("Attachment '%(pagename)s/%(filename)s' moved to '%(new_pagename)s/%(new_filename)s'."
                  ) % {
                      'pagename': pagename,
                      'filename': attachment,
                      'new_pagename': new_pagename,
                      'new_filename': new_attachment
                  })
        else:
            upload_form(pagename, request, msg=_("Nothing changed"))
    else:
        upload_form(
            pagename,
            request,
            msg=
            _("Page '%(new_pagename)s' does not exist or you don't have enough rights."
              ) % {'new_pagename': new_pagename})
示例#40
0
def format_page_edits(macro, line):
    request = macro.request
    _ = request.getText
    pagename = line.pagename
    rev = int(line.rev)
    page = Page(request, pagename)

    html_link = u''
    if not page.exists():
        revbefore = rev - 1
        if revbefore and page.exists(rev=revbefore, domain='standard'):
            # indicate page was deleted and show diff to last existing revision of it
            html_link = page.link_to_raw(request, u'(削除) %s' % pagename,
                                         querystr={'action': 'diff'}, rel='nofollow')
        else:
            html_link = pagename
    else:
        html_link = page.link_to(request, pagename, rel='nofollow')

    return u'<li>%s</li>' % html_link
示例#41
0
    def _get_page_body(self, page, this_page):
        """Return the content of a named page; accepts relative pages"""

        if page.startswith("/") or len(page) == 0:
            page = this_page + page

        p = Page(self.request, page)
        if not p.exists():
            raise RuntimeError("Page '%s' not found" % page)
        else:
            return p.get_raw_body().split('\n')
示例#42
0
    def sidebar(self, d):
        """ Assemble wiki sidebar """
        name = u'サイドバー'
        page = Page(self.request, name)
        if not page.exists():
            return u''

        buff = StringIO()
        self.request.redirect(buff)
        page.send_page(content_only=1)
        self.request.redirect()
        return u'<div id="sidebar">%s</div>' % buff.getvalue()
示例#43
0
def execute(pagename, request):
    """                                                                         
    Handle action=IntraFarmCopy
    """

    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    # Which local farm wiki - assume team for now                               
    to_wiki_url = COPY_TO_WIKI_URL 
    # New page name                                               
    to_wiki_pagename = '%s%s' % (COPY_TO_PREFIX, pagename)

    # create page at local wiki if it doesn't exist                             
    to_request = ScriptContext(to_wiki_url)
    # login this user remotely
    to_uid = user.getUserId(to_request, request.user.name)
    to_user = user.User(to_request, id=to_uid, name=request.user.name, password=request.user.password, auth_method="moin")
    to_request.user = to_user

    try:
        page = Page(to_request, to_wiki_pagename)
    except:
        return action_error(u'Error accessing destination page')
    if not page.exists():
        pe = PageEditor(to_request, to_wiki_pagename)
        # make initial revision pointer to original                             
        try:
            pe.saveText(u'[[%s:%s]]' % (request.cfg.interwikiname, pagename), 0, comment="Automated IntraFarmCopy pointing to original page")
        except pe.Unchanged:
            return action_error(u'Could not save initial page')
	except pe.AccessDenied:
            return action_error(u'Could not acquire credentials')
        # make next revision content of this page
        try:
            pe.saveText(Page(request, pagename).get_raw_body(), 0, comment="Automated IntraFarmCopy importing contents from original page at [[%s:%s]]" % (request.cfg.interwikiname, pagename))
        except pe.Unchanged:
            return action_error(u'Could not save destination page text')
	# send attachments over
        attachments = AttachFile._get_files(request, pagename)
	for attachname in attachments:
	    filename = AttachFile.getFilename(request, pagename, attachname)
	    if not os.path.isfile(filename):
	        continue
	    to_filename = AttachFile.getFilename(to_request, to_wiki_pagename, attachname)
	    shutil.copyfile(filename, to_filename)
	    AttachFile._addLogEntry(to_request, 'ATTNEW', to_wiki_pagename, attachname) 
        # redirect user to view new page in other wiki                          
        request.http_redirect('%s%s' % (to_wiki_url, to_wiki_pagename))
	return
    else:
	return action_error(u'Destination page already exists!')
示例#44
0
 def createSyncPage(self, page_name):
     normalised_name = normalise_pagename(page_name, self.prefix)
     if normalised_name is None:
         return None
     page = Page(self.request, page_name)
     revno = page.get_real_rev()
     if revno == 99999999:  # I love sane in-band signalling
         return None
     return SyncPage(normalised_name,
                     local_rev=revno,
                     local_name=page_name,
                     local_deleted=not page.exists())
示例#45
0
	def prepend_to_wiki_page(self, page_uri, heading, content):
		old_content = ''
		wiki_page = Page(self.request, page_uri)
		#if exists, get old page content
		if(wiki_page.exists()):
			old_content = wiki_page.get_raw_body()
		pagecontent = """\
== %(heading)s ==
%(content)s

%(old_content)s
""" % {'heading': heading, 'content': content, 'old_content': old_content }
		return(PageEditor(self.request, page_uri).saveText(pagecontent, 0))
示例#46
0
 def rollback_local_change(): # YYY direct local access
     comment = u"Wikisync rollback"
     rev = new_local_rev - 1
     revstr = '%08d' % rev
     oldpg = Page(self.request, sp.local_name, rev=rev)
     pg = PageEditor(self.request, sp.local_name)
     if not oldpg.exists():
         pg.deletePage(comment)
     else:
         try:
             savemsg = pg.saveText(oldpg.get_raw_body(), 0, comment=comment, extra=revstr, action="SAVE/REVERT")
         except PageEditor.Unchanged:
             pass
     return sp.local_name
示例#47
0
文件: info.py 项目: steveyen/moingo
    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1),
                      f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1),
                      f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        from MoinMoin.support.python_compatibility import hash_new
        digest = hash_new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
        request.write(f.paragraph(1),
                      f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
                          'label': _("SHA digest of this page's content is:"),
                          'value': digest, }),
                      f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request, include_self=1, return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(f.rawHTML("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))
示例#48
0
    def _load_template(self, variable, default):
        if not variable and default:
            name = default
        else:
            name = getattr(self.request.cfg, variable, default)

        if not self.request.user.may.read(name):
            raise InviteException("You are not allowed to read template page '%s'." % name)

        page = Page(self.request, name)
        if not page.exists():
            raise InviteException("Template page '%s' does not exist." % name)

        return page.get_raw_body()
示例#49
0
 def rollback_local_change(): # YYY direct local access
     comment = u"Wikisync rollback"
     rev = new_local_rev - 1
     revstr = '%08d' % rev
     oldpg = Page(self.request, sp.local_name, rev=rev)
     pg = PageEditor(self.request, sp.local_name)
     if not oldpg.exists():
         pg.deletePage(comment)
     else:
         try:
             savemsg = pg.saveText(oldpg.get_raw_body(), 0, comment=comment, extra=revstr, action="SAVE/REVERT")
         except PageEditor.Unchanged:
             pass
     return sp.local_name
示例#50
0
文件: RandomPage.py 项目: aahlad/soar
def macro_RandomPage(macro, links=1):
    request = macro.request
    links = max(links, 1)  # at least 1 link

    # Get full page unfiltered page list - very fast!
    all_pages = request.rootpage.getPageList(user='', exists=0)

    # Now select random page from the full list, and if it exists and we
    # can read it, save.
    pages = []
    found = 0
    while found < links and all_pages:
        # Take one random page from the list
        pagename = random.choice(all_pages)
        all_pages.remove(pagename)

        # Filter out deleted pages or pages the user may not read.
        page = Page(request, pagename)
        if page.exists() and request.user.may.read(pagename):
            pages.append(pagename)
            found += 1

    if not pages:
        return ''

    f = macro.formatter

    # return a single page link
    if links == 1:
        name = pages[0]
        return (f.pagelink(1, name, generated=1) + f.text(name) +
                f.pagelink(0, name))

    # return a list of page links
    pages.sort()
    result = []
    write = result.append

    write(f.bullet_list(1))
    for name in pages:
        write(f.listitem(1))
        write(f.pagelink(1, name, generated=1))
        write(f.text(name))
        write(f.pagelink(0, name))
        write(f.listitem(0))
    write(f.bullet_list(0))

    result = ''.join(result)
    return result
    def sidebar(self, d, **keywords):
        """ Display page called SideBar as an additional element on every page
        content_id has been changed from the original

        @param d: parameter dictionary
        @rtype: string
        @return: sidebar html
        """

        # Check which page to display, return nothing if doesn't exist.
        sidebar = self.request.getPragma('sidebar', u'SideBar')
        page = Page(self.request, sidebar)
        if not page.exists():
            return u""
        # Capture the page's generated HTML in a buffer.
        buffer = StringIO.StringIO()
        self.request.redirect(buffer)
        try:
            page.send_page(content_only=1, content_id="sidebar-content")
        finally:
            self.request.redirect()
        return u'<div class="sidebar clearfix">%s</div>' % buffer.getvalue()
示例#52
0
    def testSaveAbort(self):
        """Test if saveText() is interrupted if PagePreSave event handler returns Abort"""
        def handler(event):
            from MoinMoin.events import Abort
            return Abort("This is just a test")

        pagename = u'AutoCreatedMoinMoinTemporaryTestPageFortestSave'
        testtext = u'ThisIsSomeStupidTestPageText!'

        self.request.cfg.event_handlers = [handler]

        page = Page(self.request, pagename)
        if page.exists():
            deleter = PageEditor(self.request, pagename)
            deleter.deletePage()

        editor = PageEditor(self.request, pagename)
        editor.saveText(testtext, 0)

        print "PageEditor can't save a page if Abort is returned from PreSave event handlers"
        page = Page(self.request, pagename)
        assert page.body != testtext
示例#53
0
def execute(macro, args):
    formatter = macro.formatter
    macro.request.page.formatter = formatter
    request = macro.request
    _ = request.getText

    if not cairo_found:
        return formatter.text(_(\
            "ERROR: Cairo Python extensions not installed. " +\
            "Not performing layout.")) + formatter.linebreak()

    urlargs, macro_args = radarchart_args(args)

    pagename = request.page.page_name
    # The first page mentioned will be the page of the chart
    for arg in macro_args.split(','):
        page = Page(request, arg)
        if page.exists():
            pagename = arg
            break

    return u'<div class="metaradarchart">' + \
           u'<img src="%s">' % url_construct(request, urlargs, pagename) + \
           u'</div>'
示例#54
0
def execute(macro, text):
    request = macro.request
    formatter = macro.formatter
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    currentyear, currentmonth, currentday, h, m, s, wd, yd, ds = request.user.getTime(
        time.time())
    thispage = formatter.page.page_name
    # does the url have calendar params (= somebody has clicked on prev/next links in calendar) ?
    if 'calparms' in macro.request.args:
        has_calparms = 1  # yes!
        text2 = macro.request.args['calparms']
        cparmpagename, cparmyear, cparmmonth, cparmoffset, cparmoffset2, cparmheight6, cparmanniversary, cparmtemplate = \
            parseargs(request, text2, thispage, currentyear, currentmonth, 0, 0, False, False, u'')
        # Note: cparmheight6 and cparmanniversary are not used, they are just there
        # to have a consistent parameter string in calparms and macro args
    else:
        has_calparms = 0

    if text is None:  # macro call without parameters
        text = u''

    # parse and check arguments
    parmpagename, parmyear, parmmonth, parmoffset, parmoffset2, parmheight6, anniversary, parmtemplate = \
        parseargs(request, text, thispage, currentyear, currentmonth, 0, 0, False, False, u'')

    # does url have calendar params and is THIS the right calendar to modify (we can have multiple
    # calendars on the same page)?
    #if has_calparms and (cparmpagename,cparmyear,cparmmonth,cparmoffset) == (parmpagename,parmyear,parmmonth,parmoffset):

    # move all calendars when using the navigation:
    if has_calparms and cparmpagename == parmpagename:
        year, month = yearmonthplusoffset(parmyear, parmmonth,
                                          parmoffset + cparmoffset2)
        parmoffset2 = cparmoffset2
        parmtemplate = cparmtemplate
    else:
        year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset)

    if request.isSpiderAgent and abs(currentyear - year) > 1:
        return ''  # this is a bot and it didn't follow the rules (see below)
    if currentyear == year:
        attrs = {}
    else:
        attrs = {
            'rel': 'nofollow'
        }  # otherwise even well-behaved bots will index forever

    # get the calendar
    monthcal = calendar.monthcalendar(year, month)

    # european / US differences
    months = ('January', 'February', 'March', 'April', 'May', 'June', 'July',
              'August', 'September', 'October', 'November', 'December')
    # Set things up for Monday or Sunday as the first day of the week
    if calendar.firstweekday() == calendar.MONDAY:
        wkend = (5, 6)
        wkdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
    if calendar.firstweekday() == calendar.SUNDAY:
        wkend = (0, 6)
        wkdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')

    colorstep = 85
    p = Page(request, thispage)
    qpagenames = '*'.join(
        [wikiutil.quoteWikinameURL(pn) for pn in parmpagename])
    qtemplate = wikiutil.quoteWikinameURL(parmtemplate)
    querystr = "calparms=%%s,%d,%d,%d,%%d,,,%%s" % (parmyear, parmmonth,
                                                    parmoffset)
    prevlink = p.url(request,
                     querystr % (qpagenames, parmoffset2 - 1, qtemplate))
    nextlink = p.url(request,
                     querystr % (qpagenames, parmoffset2 + 1, qtemplate))
    prevylink = p.url(request,
                      querystr % (qpagenames, parmoffset2 - 12, qtemplate))
    nextylink = p.url(request,
                      querystr % (qpagenames, parmoffset2 + 12, qtemplate))

    prevmonth = formatter.url(1, prevlink, 'cal-link', **
                              attrs) + '&lt;' + formatter.url(0)
    nextmonth = formatter.url(1, nextlink, 'cal-link', **
                              attrs) + '&gt;' + formatter.url(0)
    prevyear = formatter.url(1, prevylink, 'cal-link', **
                             attrs) + '&lt;&lt;' + formatter.url(0)
    nextyear = formatter.url(1, nextylink, 'cal-link', **
                             attrs) + '&gt;&gt;' + formatter.url(0)

    if parmpagename != [thispage]:
        pagelinks = ''
        r, g, b = (255, 0, 0)
        l = len(parmpagename[0])
        steps = len(parmpagename)
        maxsteps = (255 / colorstep)
        if steps > maxsteps:
            steps = maxsteps
        chstep = int(l / steps)
        st = 0
        while st < l:
            ch = parmpagename[0][st:st + chstep]
            r, g, b = cliprgb(r, g, b)
            link = Page(request, parmpagename[0]).link_to(
                request,
                ch,
                rel='nofollow',
                style=
                'background-color:#%02x%02x%02x;color:#000000;text-decoration:none'
                % (r, g, b))
            pagelinks = pagelinks + link
            r, g, b = (r, g + colorstep, b)
            st = st + chstep
        r, g, b = (255 - colorstep, 255, 255 - colorstep)
        for page in parmpagename[1:]:
            link = Page(request, page).link_to(
                request,
                page,
                rel='nofollow',
                style=
                'background-color:#%02x%02x%02x;color:#000000;text-decoration:none'
                % (r, g, b))
            pagelinks = pagelinks + '*' + link
        showpagename = '   %s<BR>\n' % pagelinks
    else:
        showpagename = ''
    if calendar.firstweekday() == calendar.SUNDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, months[month-1], str(year), nextmonth, nextyear)
    if calendar.firstweekday() == calendar.MONDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;/&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, str(year), month, nextmonth, nextyear)
    restr1 = ' <tr>\n%s </tr>\n' % resth1

    r7 = range(7)
    restd2 = []
    for wkday in r7:
        wday = _(wkdays[wkday])
        if wkday in wkend:
            cssday = "cal-weekend"
        else:
            cssday = "cal-workday"
        restd2.append('  <td class="%s">%s</td>\n' % (cssday, wday))
    restr2 = ' <tr>\n%s </tr>\n' % "".join(restd2)

    if parmheight6:
        while len(monthcal) < 6:
            monthcal = monthcal + [[0, 0, 0, 0, 0, 0, 0]]

    maketip_js = []
    restrn = []
    for week in monthcal:
        restdn = []
        for wkday in r7:
            day = week[wkday]
            if not day:
                restdn.append('  <td class="cal-invalidday">&nbsp;</td>\n')
            else:
                page = parmpagename[0]
                if anniversary:
                    link = "%s/%02d-%02d" % (page, month, day)
                else:
                    link = "%s/%4d-%02d-%02d" % (page, year, month, day)
                daypage = Page(request, link)
                if daypage.exists() and request.user.may.read(link):
                    csslink = "cal-usedday"
                    query = {}
                    r, g, b, u = (255, 0, 0, 1)
                    daycontent = daypage.get_raw_body()
                    header1_re = re.compile(r'^\s*=\s(.*)\s=$',
                                            re.MULTILINE)  # re.UNICODE
                    titletext = []
                    for match in header1_re.finditer(daycontent):
                        if match:
                            title = match.group(1)
                            title = wikiutil.escape(title).replace("'", "\\'")
                            titletext.append(title)
                    tipname_unescaped = link.replace("'", "\\'")
                    link = wikiutil.escape(link).replace("'", "\\'")
                    tipname = link
                    tiptitle = link
                    tiptext = '<br>'.join(titletext)
                    maketip_js.append("maketip('%s','%s','%s');" %
                                      (tipname, tiptitle, tiptext))
                    attrs = {
                        'onMouseOver': "tip('%s')" % tipname_unescaped,
                        'onMouseOut': "untip()"
                    }
                else:
                    csslink = "cal-emptyday"
                    if parmtemplate:
                        query = {'action': 'edit', 'template': parmtemplate}
                    else:
                        query = {}
                    r, g, b, u = (255, 255, 255, 0)
                    if wkday in wkend:
                        csslink = "cal-weekend"
                    attrs = {'rel': 'nofollow'}
                for otherpage in parmpagename[1:]:
                    otherlink = "%s/%4d-%02d-%02d" % (otherpage, year, month,
                                                      day)
                    otherdaypage = Page(request, otherlink)
                    if otherdaypage.exists():
                        csslink = "cal-usedday"
                        if u == 0:
                            r, g, b = (r - colorstep, g, b - colorstep)
                        else:
                            r, g, b = (r, g + colorstep, b)
                r, g, b = cliprgb(r, g, b)
                style = 'background-color:#%02x%02x%02x' % (r, g, b)
                fmtlink = formatter.url(1, daypage.url(request,
                                                       query), csslink, **
                                        attrs) + str(day) + formatter.url(0)
                if day == currentday and month == currentmonth and year == currentyear:
                    cssday = "cal-today"
                    fmtlink = "<b>%s</b>" % fmtlink  # for browser with CSS probs
                else:
                    cssday = "cal-nottoday"
                restdn.append('  <td style="%s" class="%s">%s</td>\n' %
                              (style, cssday, fmtlink))
        restrn.append(' <tr>\n%s </tr>\n' % "".join(restdn))

    restable = '<table border="2" cellspacing="2" cellpadding="2">\n<col width="14%%" span="7">%s%s%s</table>\n'
    restable = restable % (restr1, restr2, "".join(restrn))

    if maketip_js:
        tip_js = '''<script language="JavaScript" type="text/javascript">
<!--
%s
// -->
</script>
''' % '\n'.join(maketip_js)
    else:
        tip_js = ''

    result = """\
<script type="text/javascript" src="%s/common/js/infobox.js"></script>
<div id="%s" style="position:absolute; visibility:hidden; z-index:20; top:-999em; left:0px;"></div>
%s%s
""" % (request.cfg.url_prefix_static, formatter.make_id_unique('infodiv'),
       tip_js, restable)
    return formatter.rawHTML(result)
示例#55
0
    def visit_reference(self, node):
        """
            Pass links to MoinMoin to get the correct wiki space url. Extract
            the url and pass it on to the html4css1 writer to handle. Inline
            images are also handled by visit_image. Not sure what the "drawing:"
            link scheme is used for, so for now it is handled here.

            Also included here is a hack to allow MoinMoin macros. This routine
            checks for a link which starts with "<<". This link is passed to the
            MoinMoin formatter and the resulting markup is inserted into the
            document in the place of the original link reference.
        """
        if 'refuri' in node.attributes:
            refuri = node['refuri']
            prefix = ''
            link = refuri
            if ':' in refuri:
                prefix, link = refuri.lstrip().split(':', 1)

            # First see if MoinMoin should handle completely. Exits through add_wiki_markup.
            if refuri.startswith('<<') and refuri.endswith('>>'):  # moin macro
                self.process_wiki_text(refuri)
                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                self.add_wiki_markup()

            if prefix == 'drawing':
                self.process_wiki_text("[[%s]]" % refuri)
                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                self.add_wiki_markup()

            # From here down, all links are handled by docutils (except
            # missing attachments), just fixup node['refuri'].
            if prefix == 'attachment':
                if not AttachFile.exists(self.request,
                                         self.request.page.page_name, link):
                    # Attachment doesn't exist, give to MoinMoin to insert upload text.
                    self.process_wiki_text("[[%s]]" % refuri)
                    self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                    self.add_wiki_markup()
                # Attachment exists, just get a link to it.
                node['refuri'] = AttachFile.getAttachUrl(
                    self.request.page.page_name, link, self.request)
                if not [
                        i for i in node.children
                        if i.__class__ == docutils.nodes.image
                ]:
                    node['classes'].append(prefix)
            elif prefix == 'wiki':
                wiki_name, page_name = wikiutil.split_interwiki(link)
                wikitag, wikiurl, wikitail, err = wikiutil.resolve_interwiki(
                    self.request, wiki_name, page_name)
                wikiurl = wikiutil.mapURL(self.request, wikiurl)
                node['refuri'] = wikiutil.join_wiki(wikiurl, wikitail)
                # Only add additional class information if the reference does
                # not have a child image (don't want to add additional markup
                # for images with targets).
                if not [
                        i for i in node.children
                        if i.__class__ == docutils.nodes.image
                ]:
                    node['classes'].append('interwiki')
            elif prefix == 'javascript':
                # is someone trying to do XSS with javascript?
                node['refuri'] = 'javascript:alert("it does not work")'
            elif prefix != '':
                # Some link scheme (http, file, https, mailto, etc.), add class
                # information if the reference doesn't have a child image (don't
                # want additional markup for images with targets).
                # Don't touch the refuri.
                if not [
                        i for i in node.children
                        if i.__class__ == docutils.nodes.image
                ]:
                    node['classes'].append(prefix)
            else:
                # Default case - make a link to a wiki page.
                pagename, anchor = wikiutil.split_anchor(refuri)
                page = Page(
                    self.request,
                    wikiutil.AbsPageName(self.formatter.page.page_name,
                                         pagename))
                node['refuri'] = page.url(self.request, anchor=anchor)
                if not page.exists():
                    node['classes'].append('nonexistent')
        html4css1.HTMLTranslator.visit_reference(self, node)
示例#56
0
def execute(pagename, request):
    form = values_to_form(request.values)

    util = form.get('util', [None])[0]

    if util == "format":
        txt = form.get('text', [None])[0]
        request.write(format_wikitext(request, txt))

    elif util == "getTemplate":
        template = form.get('name', [None])[0]
        template_page = wikiutil.unquoteWikiname(template)
        if request.user.may.read(template_page):
            Page(request, template_page).send_raw()

    elif util == "newPage":
        page = form.get('page', [None])[0]
        content = form.get('content', [""])[0]
        request.content_type = "application/json"

        if request.environ['REQUEST_METHOD'] != 'POST':
            return

        if not page:
            msg = "Page name not defined!"
            json.dump(dict(status="error", msg=msg), request)
            return

        if not request.user.may.write(page):
            msg = "You are not allowed to edit this page"
            json.dump(dict(status="error", msg=msg), request)
            return

        p = Page(request, page)
        if p.exists():
            msg = "Page already exists"
            json.dump(dict(status="error", msg=msg), request)
            return

        editor = PageEditor(request, page)
        msg = editor.saveText(content, p.get_real_rev())
        json.dump(dict(status="ok", msg=msg), request)

    elif util == "getProperties":
        request.content_type = "application/json"
        key = form.get('key', [''])[0]
        json.dump(get_properties(request, key), request)
        return

    elif util == "uploadFile":
        request.content_type = "application/json"

        if not request.user.may.write(pagename):
            msg = u"You are not allowed to edit this page!"
            json.dump(dict(status="error", msg=msg), request)
            request.status_code = 403
            return

        from MoinMoin.action.AttachFile import add_attachment, AttachmentAlreadyExists

        try:
            overwrite = int(form.get('overwrite', ['0'])[0])
        except:
            overwrite = 0

        response = dict(success=list(), failed=list())
        for name in request.files:
            _file = request.files.get(name)
            filename = _file.filename
            try:
                t, s = add_attachment(request,
                                      pagename,
                                      filename,
                                      _file.stream,
                                      overwrite=overwrite)
                response['success'].append(filename)
            except AttachmentAlreadyExists:
                response['failed'].append(filename)

        json.dump(response, request)
        return

    elif util == "getAttachments":
        request.content_type = "application/json"
        from MoinMoin.action.AttachFile import _get_files, getAttachUrl

        files = _get_files(request, pagename)
        response = []
        for name in files:
            response.append(
                dict(url=getAttachUrl(pagename, name, request), name=name))

        json.dump(response, request)

    return
示例#57
0
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    _ = request.getText
    groups = request.groups

    data = TupleDataset()
    data.columns = [
        Column('name', label=_('Username')),
        Column('groups', label=_('Member of Groups')),
        Column('email', label=_('Email')),
        Column('jabber', label=_('Jabber')),
        Column('action', label=_('Action')),
    ]

    # Iterate over users
    for uid in user.getUserList(request):
        account = user.User(request, uid)

        account_groups = set(groups.groups_with_member(account.name))
        wiki_groups = set([group for group in account_groups if isinstance(groups[group], WikiGroup)])
        other_groups = list(account_groups - wiki_groups)

        # First show groups that are defined in wikipages linking to it
        # after show groups from other backends.
        grouppage_links = ', '.join([Page(request, group_name).link_to(request) for group_name in wiki_groups] +
                                    other_groups)

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = wikiutil.escape(account.name)

        # creates the POST data for account disable/enable
        val = "1"
        text=_('Disable user')
        if account.disabled:
            text=_('Enable user')
            val = "0"
            namelink += " (%s)" % _("disabled")

        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='userprofile'))
        ticket = wikiutil.createTicket(request, action='userprofile')
        ret.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket))
        ret.append(html.INPUT(type='hidden', name='name', value=account.name))
        ret.append(html.INPUT(type='hidden', name='key', value="disabled"))
        ret.append(html.INPUT(type='hidden', name='val', value=val))
        ret.append(html.INPUT(type='submit', name='userprofile', value=text))
        enable_disable_link = unicode(unicode(ret))

        # creates the POST data for recoverpass
        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='recoverpass'))
        ret.append(html.INPUT(type='hidden', name='email', value=account.email))
        ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1"))
        ret.append(html.INPUT(type='hidden', name='sysadm', value="users"))
        ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data')))
        recoverpass_link =  unicode(unicode(ret))

        if account.email:
            email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') +
                          request.formatter.text(account.email) +
                          request.formatter.url(0))
        else:
            email_link = ''

        if account.jid:
            jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') +
                           request.formatter.text(account.jid) +
                           request.formatter.url(0))
        else:
            jabber_link = ''

        data.addRow((
            (request.formatter.rawHTML(namelink), account.name),
            request.formatter.rawHTML(grouppage_links),
            email_link,
            jabber_link,
            recoverpass_link + enable_disable_link
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data, sort_columns=[0])
        return browser.render()

    # No data
    return ''