Esempio n. 1
0
 def test_cache_key(self):
     """ The key must be different for different <kw> """
     test_kw1 = {'MoinMoin': 'value1'}
     result1 = crypto.cache_key(**test_kw1)
     test_kw2 = {'Moin2': 'value2'}
     result2 = crypto.cache_key(**test_kw2)
     assert result1 != result2, "Expected different keys for different <kw> but got the same"
Esempio n. 2
0
 def test_cache_key(self):
     """ The key must be different for different <kw> """
     test_kw1 = {'MoinMoin': 'value1'}
     result1 = crypto.cache_key(**test_kw1)
     test_kw2 = {'Moin2': 'value2'}
     result2 = crypto.cache_key(**test_kw2)
     assert result1 != result2, ("Expected different keys for different <kw> but got the same")
Esempio n. 3
0
    def put_member(self, name, content, content_length, expected_members):
        """
        puts a new member file into a temporary tar container.
        If all expected members have been put, it saves the tar container
        to a new item revision.

        :param name: name of the data in the container file
        :param content: the data to store into the tar file (str or file-like)
        :param content_length: byte-length of content (for str, None can be given)
        :param expected_members: set of expected member file names
        """
        if name not in expected_members:
            raise StorageError(
                "tried to add unexpected member {0!r} to container item {1!r}".
                format(name, self.name))
        if isinstance(name, unicode):
            name = name.encode('utf-8')
        temp_fname = os.path.join(
            tempfile.gettempdir(),
            'TarContainer_' + cache_key(usage='TarContainer', name=self.name))
        tf = tarfile.TarFile(temp_fname, mode='a')
        ti = tarfile.TarInfo(name)
        if isinstance(content, str):
            if content_length is None:
                content_length = len(content)
            content = StringIO(content)  # we need a file obj
        elif not hasattr(content, 'read'):
            logging.error("unsupported content object: {0!r}".format(content))
            raise StorageError(
                "unsupported content object: {0!r}".format(content))
        assert content_length >= 0  # we don't want -1 interpreted as 4G-1
        ti.size = content_length
        tf.addfile(ti, content)
        tf_members = set(tf.getnames())
        tf.close()
        if tf_members - expected_members:
            msg = "found unexpected members in container item {0!r}".format(
                self.name)
            logging.error(msg)
            os.remove(temp_fname)
            raise StorageError(msg)
        if tf_members == expected_members:
            # everything we expected has been added to the tar file, save the container as revision
            meta = {CONTENTTYPE: self.contenttype}
            data = open(temp_fname, 'rb')
            self.item._save(meta,
                            data,
                            name=self.name,
                            action=ACTION_SAVE,
                            comment='')
            data.close()
            os.remove(temp_fname)
Esempio n. 4
0
    def navibar(self, item_name):
        """
        Assemble the navibar

        :rtype: list
        :returns: list of tuples (css_class, url, link_text, title)
        """
        flaskg.clock.start('navibar')
        current = item_name
        # Process config navi_bar
        items = [(cls, url_for(endpoint, **args), link_text, title)
                 for cls, endpoint, args, link_text, title in self.cfg.navi_bar]

        # Add user links to wiki links.
        for text in self.user.quicklinks:
            url, link_text, title = self.split_navilink(text)
            items.append(('userlink', url, link_text, title))

        # Add sister pages (see http://usemod.com/cgi-bin/mb.pl?SisterSitesImplementationGuide )
        for sistername, sisterurl in self.cfg.sistersites:
            if is_local_wiki(sistername):
                items.append(('sisterwiki current', sisterurl, sistername))
            else:
                cid = cache_key(usage="SisterSites", sistername=sistername)
                sisteritems = app.cache.get(cid)
                if sisteritems is None:
                    uo = urllib.URLopener()
                    uo.version = 'MoinMoin SisterItem list fetcher 1.0'
                    try:
                        sisteritems = {}
                        f = uo.open(sisterurl)
                        for line in f:
                            line = line.strip()
                            try:
                                item_url, item_name = line.split(' ', 1)
                                sisteritems[item_name.decode('utf-8')] = item_url
                            except:
                                pass # ignore invalid lines
                        f.close()
                        app.cache.set(cid, sisteritems)
                        logging.info("Site: {0} Status: Updated. Pages: {1}".format(sistername, len(sisteritems)))
                    except IOError as err:
                        (title, code, msg, headers) = err.args # code e.g. 304
                        logging.warning("Site: {0} Status: Not updated.".format(sistername))
                        logging.exception("exception was:")
                if current in sisteritems:
                    url = sisteritems[current]
                    items.append(('sisterwiki', url, sistername, ''))
        flaskg.clock.stop('navibar')
        return items
Esempio n. 5
0
 def _do_get_modified(self, hash, force_attachment=False, mimetype=None):
     try:
         width = int(request.values.get('w'))
     except (TypeError, ValueError):
         width = None
     try:
         height = int(request.values.get('h'))
     except (TypeError, ValueError):
         height = None
     try:
         transpose = int(request.values.get('t'))
         assert 1 <= transpose <= 8
     except (TypeError, ValueError, AssertionError):
         transpose = 1
     if width or height or transpose != 1:
         # resize requested, XXX check ACL behaviour! XXX
         hash_name = HASH_ALGORITHM
         hash_hexdigest = self.rev.meta[hash_name]
         cid = cache_key(usage="ImageTransform",
                         hash_name=hash_name,
                         hash_hexdigest=hash_hexdigest,
                         width=width,
                         height=height,
                         transpose=transpose)
         c = app.cache.get(cid)
         if c is None:
             if mimetype:
                 content_type = mimetype
             else:
                 content_type = self.rev.meta[CONTENTTYPE]
             size = (width or 99999, height or 99999)
             content_type, data = self._transform(content_type,
                                                  size=size,
                                                  transpose_op=transpose)
             headers = wikiutil.file_headers(content_type=content_type,
                                             content_length=len(data))
             app.cache.set(cid, (headers, data))
         else:
             # XXX TODO check ACL behaviour
             headers, data = c
         return Response(data, headers=headers)
     else:
         return self._do_get(hash,
                             force_attachment=force_attachment,
                             mimetype=mimetype)
Esempio n. 6
0
    def internal_representation(self, attributes=None):
        """
        Return the internal representation of a document using a DOM Tree
        """
        hash_name = HASH_ALGORITHM
        hash_hexdigest = self.rev.meta.get(hash_name)
        if hash_hexdigest:
            cid = cache_key(usage="internal_representation",
                            hash_name=hash_name,
                            hash_hexdigest=hash_hexdigest)
            doc = app.cache.get(cid)
        else:
            # likely a non-existing item
            doc = cid = None
        if doc is None:
            # We will see if we can perform the conversion:
            # FROM_mimetype --> DOM
            # if so we perform the transformation, otherwise we don't
            from MoinMoin.converter import default_registry as reg
            input_conv = reg.get(Type(self.contenttype), type_moin_document)
            if not input_conv:
                raise TypeError(
                    "We cannot handle the conversion from {0} to the DOM tree".
                    format(self.contenttype))
            smiley_conv = reg.get(type_moin_document,
                                  type_moin_document,
                                  icon='smiley')

            # We can process the conversion
            links = Iri(scheme='wiki', authority='', path='/' + self.name)
            doc = input_conv(self.rev, self.contenttype, arguments=attributes)
            # XXX is the following assuming that the top element of the doc tree
            # is a moin_page.page element? if yes, this is the wrong place to do that
            # as not every doc will have that element (e.g. for images, we just get
            # moin_page.object, for a tar item, we get a moin_page.table):
            doc.set(moin_page.page_href, unicode(links))
            if self.contenttype.startswith((
                    u'text/x.moin.wiki',
                    u'text/x-mediawiki',
                    u'text/x.moin.creole',
            )):
                doc = smiley_conv(doc)
            if cid:
                app.cache.set(cid, doc)
        return doc
Esempio n. 7
0
    def put_member(self, name, content, content_length, expected_members):
        """
        puts a new member file into a temporary tar container.
        If all expected members have been put, it saves the tar container
        to a new item revision.

        :param name: name of the data in the container file
        :param content: the data to store into the tar file (str or file-like)
        :param content_length: byte-length of content (for str, None can be given)
        :param expected_members: set of expected member file names
        """
        if not name in expected_members:
            raise StorageError("tried to add unexpected member {0!r} to container item {1!r}".format(name, self.name))
        if isinstance(name, unicode):
            name = name.encode("utf-8")
        temp_fname = os.path.join(
            tempfile.gettempdir(), "TarContainer_" + cache_key(usage="TarContainer", name=self.name)
        )
        tf = tarfile.TarFile(temp_fname, mode="a")
        ti = tarfile.TarInfo(name)
        if isinstance(content, str):
            if content_length is None:
                content_length = len(content)
            content = StringIO(content)  # we need a file obj
        elif not hasattr(content, "read"):
            logging.error("unsupported content object: {0!r}".format(content))
            raise StorageError("unsupported content object: {0!r}".format(content))
        assert content_length >= 0  # we don't want -1 interpreted as 4G-1
        ti.size = content_length
        tf.addfile(ti, content)
        tf_members = set(tf.getnames())
        tf.close()
        if tf_members - expected_members:
            msg = "found unexpected members in container item {0!r}".format(self.name)
            logging.error(msg)
            os.remove(temp_fname)
            raise StorageError(msg)
        if tf_members == expected_members:
            # everything we expected has been added to the tar file, save the container as revision
            meta = {CONTENTTYPE: self.contenttype}
            data = open(temp_fname, "rb")
            self.item._save(meta, data, name=self.name, action=ACTION_SAVE, comment="")
            data.close()
            os.remove(temp_fname)
Esempio n. 8
0
    def _render_data_diff_raw(self, oldrev, newrev):
        hash_name = HASH_ALGORITHM
        cid = cache_key(usage="ImageDiff",
                        hash_name=hash_name,
                        hash_old=oldrev.meta[hash_name],
                        hash_new=newrev.meta[hash_name])
        c = app.cache.get(cid)
        if c is None:
            if PIL is None:
                abort(404)  # TODO render user friendly error image

            content_type = newrev.meta[CONTENTTYPE]
            if content_type == 'image/jpeg':
                output_type = 'JPEG'
            elif content_type == 'image/png':
                output_type = 'PNG'
            elif content_type == 'image/gif':
                output_type = 'GIF'
            else:
                raise ValueError(
                    "content_type {0!r} not supported".format(content_type))

            try:
                oldimage = PILImage.open(oldrev.data)
                newimage = PILImage.open(newrev.data)
                oldimage.load()
                newimage.load()
                diffimage = PILdiff(newimage, oldimage)
                outfile = StringIO()
                diffimage.save(outfile, output_type)
                data = outfile.getvalue()
                outfile.close()
                headers = wikiutil.file_headers(content_type=content_type,
                                                content_length=len(data))
                app.cache.set(cid, (headers, data))
            except (IOError, ValueError) as err:
                logging.exception("error during PILdiff: {0}".format(
                    err.message))
                abort(404)  # TODO render user friendly error image
        else:
            # XXX TODO check ACL behaviour
            headers, data = c
        return Response(data, headers=headers)
Esempio n. 9
0
 def _do_get_modified(self, hash, force_attachment=False, mimetype=None):
     try:
         width = int(request.values.get("w"))
     except (TypeError, ValueError):
         width = None
     try:
         height = int(request.values.get("h"))
     except (TypeError, ValueError):
         height = None
     try:
         transpose = int(request.values.get("t"))
         assert 1 <= transpose <= 8
     except (TypeError, ValueError, AssertionError):
         transpose = 1
     if width or height or transpose != 1:
         # resize requested, XXX check ACL behaviour! XXX
         hash_name = HASH_ALGORITHM
         hash_hexdigest = self.rev.meta[hash_name]
         cid = cache_key(
             usage="ImageTransform",
             hash_name=hash_name,
             hash_hexdigest=hash_hexdigest,
             width=width,
             height=height,
             transpose=transpose,
         )
         c = app.cache.get(cid)
         if c is None:
             if mimetype:
                 content_type = mimetype
             else:
                 content_type = self.rev.meta[CONTENTTYPE]
             size = (width or 99999, height or 99999)
             content_type, data = self._transform(content_type, size=size, transpose_op=transpose)
             headers = wikiutil.file_headers(content_type=content_type, content_length=len(data))
             app.cache.set(cid, (headers, data))
         else:
             # XXX TODO check ACL behaviour
             headers, data = c
         return Response(data, headers=headers)
     else:
         return self._do_get(hash, force_attachment=force_attachment, mimetype=mimetype)
Esempio n. 10
0
    def _render_data_diff_raw(self, oldrev, newrev):
        hash_name = HASH_ALGORITHM
        cid = cache_key(usage="ImageDiff",
                        hash_name=hash_name,
                        hash_old=oldrev.meta[hash_name],
                        hash_new=newrev.meta[hash_name])
        c = app.cache.get(cid)
        if c is None:
            if PIL is None:
                abort(404)  # TODO render user friendly error image

            content_type = newrev.meta[CONTENTTYPE]
            if content_type == 'image/jpeg':
                output_type = 'JPEG'
            elif content_type == 'image/png':
                output_type = 'PNG'
            elif content_type == 'image/gif':
                output_type = 'GIF'
            else:
                raise ValueError("content_type {0!r} not supported".format(content_type))

            try:
                oldimage = PILImage.open(oldrev.data)
                newimage = PILImage.open(newrev.data)
                oldimage.load()
                newimage.load()
                diffimage = PILdiff(newimage, oldimage)
                outfile = StringIO()
                diffimage.save(outfile, output_type)
                data = outfile.getvalue()
                outfile.close()
                headers = wikiutil.file_headers(content_type=content_type, content_length=len(data))
                app.cache.set(cid, (headers, data))
            except (IOError, ValueError) as err:
                logging.exception("error during PILdiff: {0}".format(err.message))
                abort(404)  # TODO render user friendly error image
        else:
            # XXX TODO check ACL behaviour
            headers, data = c
        return Response(data, headers=headers)
Esempio n. 11
0
    def internal_representation(self, converters=['smiley'], attributes=None):
        """
        Return the internal representation of a document using a DOM Tree
        """
        hash_name = HASH_ALGORITHM
        hash_hexdigest = self.rev.meta.get(hash_name)
        if hash_hexdigest:
            cid = cache_key(usage="internal_representation",
                            hash_name=hash_name,
                            hash_hexdigest=hash_hexdigest)
            doc = app.cache.get(cid)
        else:
            # likely a non-existing item
            doc = cid = None
        if doc is None:
            # We will see if we can perform the conversion:
            # FROM_mimetype --> DOM
            # if so we perform the transformation, otherwise we don't
            from MoinMoin.converter import default_registry as reg
            input_conv = reg.get(Type(self.contenttype), type_moin_document)
            if not input_conv:
                raise TypeError("We cannot handle the conversion from {0} to the DOM tree".format(self.contenttype))
            smiley_conv = reg.get(type_moin_document, type_moin_document, icon='smiley')

            # We can process the conversion
            links = Iri(scheme='wiki', authority='', path='/' + self.name)
            doc = input_conv(self.rev, self.contenttype, arguments=attributes)
            # XXX is the following assuming that the top element of the doc tree
            # is a moin_page.page element? if yes, this is the wrong place to do that
            # as not every doc will have that element (e.g. for images, we just get
            # moin_page.object, for a tar item, we get a moin_page.table):
            doc.set(moin_page.page_href, unicode(links))
            for conv in converters:
                if conv == 'smiley':
                    doc = smiley_conv(doc)
            if cid:
                app.cache.set(cid, doc)
        return doc
Esempio n. 12
0
    def navibar(self, fqname):
        """
        Assemble the navibar

        :rtype: list
        :returns: list of tuples (css_class, url, link_text, title)
        """
        if not isinstance(fqname, CompositeName):
            fqname = split_fqname(fqname)
        item_name = fqname.value
        current = item_name
        # Process config navi_bar
        items = []
        for cls, endpoint, args, link_text, title in self.cfg.navi_bar:
            if endpoint == "frontend.show_root":
                endpoint = "frontend.show_item"
                root_fqname = fqname.get_root_fqname()
                default_root = app.cfg.root_mapping.get(
                    NAMESPACE_DEFAULT, app.cfg.default_root)
                args[
                    'item_name'] = root_fqname.fullname if fqname.namespace != NAMESPACE_ALL else default_root
            elif endpoint in [
                    "frontend.global_history", "frontend.global_tags"
            ]:
                args['namespace'] = fqname.namespace
            elif endpoint == "frontend.index":
                args['item_name'] = fqname.namespace
            items.append((cls, url_for(endpoint, **args), link_text, title))

        # Add user links to wiki links.
        for text in self.user.quicklinks:
            url, link_text, title = self.split_navilink(text)
            items.append(('userlink', url, link_text, title))

        # Add sister pages (see http://usemod.com/cgi-bin/mb.pl?SisterSitesImplementationGuide )
        for sistername, sisterurl in self.cfg.sistersites:
            if is_local_wiki(sistername):
                items.append(('sisterwiki current', sisterurl, sistername, ''))
            else:
                cid = cache_key(usage="SisterSites", sistername=sistername)
                sisteritems = app.cache.get(cid)
                if sisteritems is None:
                    uo = urllib.URLopener()
                    uo.version = 'MoinMoin SisterItem list fetcher 1.0'
                    try:
                        sisteritems = {}
                        f = uo.open(sisterurl)
                        for line in f:
                            line = line.strip()
                            try:
                                item_url, item_name = line.split(' ', 1)
                                sisteritems[item_name.decode(
                                    'utf-8')] = item_url
                            except:
                                pass  # ignore invalid lines
                        f.close()
                        app.cache.set(cid, sisteritems)
                        logging.info(
                            "Site: {0!r} Status: Updated. Pages: {1}".format(
                                sistername, len(sisteritems)))
                    except IOError as err:
                        (title, code, msg, headers) = err.args  # code e.g. 304
                        logging.warning(
                            "Site: {0!r} Status: Not updated.".format(
                                sistername))
                        logging.exception("exception was:")
                if current in sisteritems:
                    url = sisteritems[current]
                    items.append(('sisterwiki', url, sistername, ''))
        return items
Esempio n. 13
0
    def navibar(self, fqname):
        """
        Assemble the navibar

        :rtype: list
        :returns: list of tuples (css_class, url, link_text, title)
        """
        if not isinstance(fqname, CompositeName):
            fqname = split_fqname(fqname)
        item_name = fqname.value
        current = item_name
        # Process config navi_bar
        items = []
        for cls, endpoint, args, link_text, title in self.cfg.navi_bar:
            if endpoint == "frontend.show_root":
                endpoint = "frontend.show_item"
                root_fqname = fqname.get_root_fqname()
                default_root = app.cfg.root_mapping.get(NAMESPACE_DEFAULT, app.cfg.default_root)
                args['item_name'] = root_fqname.fullname if fqname.namespace != NAMESPACE_ALL else default_root
            elif endpoint in ["frontend.global_history", "frontend.global_tags"]:
                args['namespace'] = fqname.namespace
            elif endpoint == "frontend.index":
                args['item_name'] = fqname.namespace
            items.append((cls, url_for(endpoint, **args), link_text, title))

        # Add user links to wiki links.
        for text in self.user.quicklinks:
            url, link_text, title = self.split_navilink(text)
            items.append(('userlink', url, link_text, title))

        # Add sister pages (see http://usemod.com/cgi-bin/mb.pl?SisterSitesImplementationGuide )
        for sistername, sisterurl in self.cfg.sistersites:
            if is_local_wiki(sistername):
                items.append(('sisterwiki current', sisterurl, sistername, ''))
            else:
                cid = cache_key(usage="SisterSites", sistername=sistername)
                sisteritems = app.cache.get(cid)
                if sisteritems is None:
                    uo = urllib.URLopener()
                    uo.version = 'MoinMoin SisterItem list fetcher 1.0'
                    try:
                        sisteritems = {}
                        f = uo.open(sisterurl)
                        for line in f:
                            line = line.strip()
                            try:
                                item_url, item_name = line.split(' ', 1)
                                sisteritems[item_name.decode('utf-8')] = item_url
                            except:
                                pass  # ignore invalid lines
                        f.close()
                        app.cache.set(cid, sisteritems)
                        logging.info("Site: {0!r} Status: Updated. Pages: {1}".format(sistername, len(sisteritems)))
                    except IOError as err:
                        (title, code, msg, headers) = err.args  # code e.g. 304
                        logging.warning("Site: {0!r} Status: Not updated.".format(sistername))
                        logging.exception("exception was:")
                if current in sisteritems:
                    url = sisteritems[current]
                    items.append(('sisterwiki', url, sistername, ''))
        return items
Esempio n. 14
0
def atom(item_name):
    # Currently atom feeds behave in the fol. way
    # - Text diffs are shown in a side-by-side fashion
    # - The current binary item is fully rendered in the feed
    # - Image(binary)'s diff is shown using PIL
    # - First item is always rendered fully
    # - Revision meta(id, size and comment) is shown for parent and current revision
    query = Term(WIKINAME, app.cfg.interwikiname)
    if item_name:
        query = And([query, Term(NAME_EXACT, item_name), ])
    revs = list(flaskg.storage.search(query, idx_name=LATEST_REVS, sortedby=[MTIME], reverse=True, limit=1))
    if revs:
        rev = revs[0]
        cid = cache_key(usage="atom", revid=rev.revid, item_name=item_name)
        content = app.cache.get(cid)
    else:
        content = None
        cid = None
    if content is None:
        if not item_name:
            title = u"{0}".format(app.cfg.sitename)
        else:
            title = u"{0} - {1}".format(app.cfg.sitename, item_name)
        feed = AtomFeed(title=title, feed_url=request.url, url=request.host_url)
        query = Term(WIKINAME, app.cfg.interwikiname)
        if item_name:
            query = And([query, Term(NAME_EXACT, item_name), ])
        history = flaskg.storage.search(query, idx_name=ALL_REVS, sortedby=[MTIME], reverse=True, limit=100)
        for rev in history:
            name = rev.name
            item = rev.item
            this_revid = rev.meta[REVID]
            previous_revid = rev.meta.get(PARENTID)
            this_rev = rev
            try:
                hl_item = Item.create(name, rev_id=this_revid)
                if previous_revid is not None:
                    # HTML diff for subsequent revisions
                    previous_rev = item[previous_revid]
                    content = hl_item.content._render_data_diff_atom(previous_rev, this_rev)
                else:
                    # full html rendering for new items
                    content = render_template('atom.html', get='first_revision', rev=this_rev,
                                              content=Markup(hl_item.content._render_data()), revision=this_revid)
                content_type = 'html'
            except Exception as e:
                logging.exception("content rendering crashed")
                content = _(u'MoinMoin feels unhappy.')
                content_type = 'text'
            author = get_editor_info(rev.meta, external=True)
            rev_comment = rev.meta.get(COMMENT, '')
            if rev_comment:
                # Trim down extremely long revision comment
                if len(rev_comment) > 80:
                    content = render_template('atom.html', get='comment_cont_merge', comment=rev_comment[79:],
                                              content=Markup(content))
                    rev_comment = u"{0}...".format(rev_comment[:79])
                feed_title = u"{0} - {1}".format(author.get(NAME, ''), rev_comment)
            else:
                feed_title = u"{0}".format(author.get(NAME, ''))
            if not item_name:
                feed_title = u"{0} - {1}".format(name, feed_title)
            feed.add(title=feed_title, title_type='text',
                     summary=content, summary_type=content_type,
                     author=author,
                     url=url_for_item(name, rev=this_revid, _external=True),
                     updated=datetime.fromtimestamp(rev.meta[MTIME]),
            )
        content = feed.to_string()
        # Hack to add XSLT stylesheet declaration since AtomFeed doesn't allow this
        content = content.split("\n")
        content.insert(1, render_template('atom.html', get='xml'))
        content = "\n".join(content)
        if cid is not None:
            app.cache.set(cid, content)
    return Response(content, content_type='application/atom+xml')