コード例 #1
0
def valid_url(prop, value, report, generate_https_urls):
    """Validate a URL in the stylesheet.

    The only valid URLs for use in a stylesheet are the custom image format
    (%%example%%) which this function will translate to actual URLs.

    """
    try:
        url = value.getStringValue()
    except IndexError:
        g.log.error("Problem validating [%r]" % value)
        raise

    m = custom_img_urls.match(url)
    if m:
        name = m.group(1)

        # this relies on localcache to not be doing a lot of lookups
        images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

        if name in images:
            if not generate_https_urls:
                url = images[name]
            else:
                url = g.media_provider.convert_to_https(images[name])
            value._setCssText("url(%s)" % url)
        else:
            # unknown image label -> error
            report.append(
                ValidationError(
                    msgs['broken_url'] % dict(brokenurl=value.cssText), value))
    else:
        report.append(ValidationError(msgs["custom_images_only"], value))
コード例 #2
0
ファイル: cssfilter.py プロジェクト: tolgaek/reddit
def valid_url(prop, value, report, generate_https_urls):
    """Validate a URL in the stylesheet.

    The only valid URLs for use in a stylesheet are the custom image format
    (%%example%%) which this function will translate to actual URLs.

    """
    try:
        url = value.getStringValue()
    except IndexError:
        g.log.error("Problem validating [%r]" % value)
        raise

    m = custom_img_urls.match(url)
    if m:
        name = m.group(1)

        # this relies on localcache to not be doing a lot of lookups
        images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

        if name in images:
            if not generate_https_urls:
                url = images[name]
            else:
                url = g.media_provider.convert_to_https(images[name])
            value._setCssText("url(%s)" % url)
        else:
            # unknown image label -> error
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
    else:
        report.append(ValidationError(msgs["custom_images_only"], value))
コード例 #3
0
ファイル: jsontemplates.py プロジェクト: Dakta/reddit
 def images(self):
     sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
     images = []
     for name, url in sr_images.iteritems():
         images.append({'name': name,
                        'link': 'url(%%%%%s%%%%)' % name,
                        'url': url})
     return images
コード例 #4
0
ファイル: jsontemplates.py プロジェクト: rlofblad/reddit
 def images(self):
     sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
     images = []
     for name, url in sr_images.iteritems():
         images.append({'name': name,
                        'link': 'url(%%%%%s%%%%)' % name,
                        'url': url})
     return images
コード例 #5
0
ファイル: cssfilter.py プロジェクト: andre-d/reddit
def valid_url(prop, value, report, generate_https_urls, enforce_custom_images_only):
    """
    checks url(...) arguments in CSS, ensuring that the contents are
    officially sanctioned.  Sanctioned urls include:
     * anything in /static/
     * image labels %%..%% for images uploaded on /about/stylesheet
     * urls with domains in g.allowed_css_linked_domains
    """
    try:
        url = value.getStringValue()
    except IndexError:
        g.log.error("Problem validating [%r]" % value)
        raise
    # local urls are allowed
    if local_urls.match(url):
        if enforce_custom_images_only:
            report.append(ValidationError(msgs["custom_images_only"], value))
            return

        t_url = None
        while url != t_url:
            t_url, url = url, filters.url_unescape(url)
        # disallow path trickery
        if "../" in url:
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
    # custom urls are allowed, but need to be transformed into a real path
    elif custom_img_urls.match(url):
        name = custom_img_urls.match(url).group(1)

        # this relies on localcache to not be doing a lot of lookups
        images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

        if name in images:
            if not generate_https_urls:
                url = images[name]
            else:
                url = s3_direct_https(images[name])
            value._setCssText("url(%s)" % url)
        else:
            # unknown image label -> error
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
    else:
        if enforce_custom_images_only:
            report.append(ValidationError(msgs["custom_images_only"], value))
            return

        try:
            u = urlparse(url)
            valid_scheme = u.scheme and u.scheme in valid_url_schemes
            valid_domain = u.netloc in g.allowed_css_linked_domains
        except ValueError:
            u = False

        # allowed domains are ok
        if not (u and valid_scheme and valid_domain):
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
コード例 #6
0
ファイル: filters.py プロジェクト: drewshaver/reddit
def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import make_url_protocol_relative

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    from r2.lib.utils import UrlParser
    from r2.lib.template_helpers import add_sr
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = make_url_protocol_relative(url)
            tag['src'] = url
        else:
            tag.extract()

    nofollow = True

    text = snudown.markdown(_force_utf8(text),
                            nofollow,
                            target,
                            renderer=snudown.RENDERER_WIKI)

    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')

    if images:
        [img_swap(image) for image in images]

    def add_ext_to_link(link):
        url = UrlParser(link.get('href'))
        if url.is_reddit_url():
            link['href'] = add_sr(link.get('href'), sr_path=False)

    if c.render_style == 'compact':
        links = soup.findAll('a')
        [add_ext_to_link(a) for a in links]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)

    text = str(soup)

    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
コード例 #7
0
def valid_url(prop, value, report):
    """
    checks url(...) arguments in CSS, ensuring that the contents are
    officially sanctioned.  Sanctioned urls include:
     * anything in /static/
     * image labels %%..%% for images uploaded on /about/stylesheet
     * urls with domains in g.allowed_css_linked_domains
    """
    try:
        url = value.getStringValue()
    except IndexError:
        g.log.error("Problem validating [%r]" % value)
        raise
    # local urls are allowed
    if local_urls.match(url):
        t_url = None
        while url != t_url:
            t_url, url = url, filters.url_unescape(url)
        # disallow path trickery
        if "../" in url:
            report.append(
                ValidationError(
                    msgs['broken_url'] % dict(brokenurl=value.cssText), value))
    # custom urls are allowed, but need to be transformed into a real path
    elif custom_img_urls.match(url):
        name = custom_img_urls.match(url).group(1)

        # this relies on localcache to not be doing a lot of lookups
        images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

        if name in images:
            url = s3_https_if_secure(images[name])
            value._setCssText("url(%s)" % url)
        else:
            # unknown image label -> error
            report.append(
                ValidationError(
                    msgs['broken_url'] % dict(brokenurl=value.cssText), value))
    else:
        try:
            u = urlparse(url)
            valid_scheme = u.scheme and u.scheme in valid_url_schemes
            valid_domain = u.netloc in g.allowed_css_linked_domains
        except ValueError:
            u = False

        # allowed domains are ok
        if not (u and valid_scheme and valid_domain):
            report.append(
                ValidationError(
                    msgs['broken_url'] % dict(brokenurl=value.cssText), value))
コード例 #8
0
ファイル: filters.py プロジェクト: pra85/reddit
def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import make_url_protocol_relative

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    from r2.lib.utils import UrlParser
    from r2.lib.template_helpers import add_sr
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = make_url_protocol_relative(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]

    def add_ext_to_link(link):
        url = UrlParser(link.get('href'))
        if url.is_reddit_url():
            link['href'] = add_sr(link.get('href'), sr_path=False)

    if c.render_style == 'compact':
        links = soup.findAll('a')
        [add_ext_to_link(a) for a in links]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
コード例 #9
0
ファイル: filters.py プロジェクト: wigg234/reddit
def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import s3_https_if_secure

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = s3_https_if_secure(url)
            tag['src'] = url
        else:
            tag.extract()

    nofollow = True

    text = snudown.markdown(_force_utf8(text),
                            nofollow,
                            target,
                            renderer=snudown.RENDERER_WIKI)

    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')

    if images:
        [img_swap(image) for image in images]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)

    text = str(soup)

    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
コード例 #10
0
ファイル: filters.py プロジェクト: Acceto/reddit
def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import media_https_if_secure

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = media_https_if_secure(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]
    
    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
コード例 #11
0
ファイル: jsontemplates.py プロジェクト: JingyanZ/reddit
 def images(self):
     sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
     images = []
     for name, url in sr_images.iteritems():
         images.append({"name": name, "link": "url(%%%%%s%%%%)" % name, "url": url})
     return images