Esempio n. 1
0
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    from r2.lib.utils import generate_affiliate_link, domain
    if not text:
        return None

    target = kwargs.get("target", None)
    text = snudown.markdown(_force_utf8(text), nofollow, target)
    to_affiliate = kwargs.get("affiliate", False)
    if to_affiliate:
        soup = BeautifulSoup(text.decode('utf-8'))
        links = soup.findAll('a')
        update_text = False

        def detect_affiliate(markdown_link):
            return domain(markdown_link.get('href'))\
                    in g.merchant_affiliate_domains

        for link in filter(detect_affiliate, links):
            update_text = True
            link['class'] = 'affiliate'
            link['data-href-url'] = link.get('href')
            link['data-affiliate-url'] = generate_affiliate_link(
                link.get('href'))

        if update_text:
            text = str(soup)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 2
0
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    from r2.lib.utils import generate_affiliate_link, domain
    if not text:
        return None

    target = kwargs.get("target", None)
    text = snudown.markdown(_force_utf8(text), nofollow, target)
    to_affiliate = kwargs.get("affiliate", False)
    if to_affiliate:
        soup = BeautifulSoup(text.decode('utf-8'))
        links = soup.findAll('a')
        update_text = False

        def detect_affiliate(markdown_link):
            return domain(markdown_link.get('href'))\
                    in g.merchant_affiliate_domains

        for link in filter(detect_affiliate, links):
            update_text = True
            link['class'] = 'affiliate'
            link['data-href-url'] = link.get('href')
            link['data-affiliate-url'] = generate_affiliate_link(
                                            link.get('href')
                                         )

        if update_text:
            text = str(soup)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 3
0
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    if not text:
        return None

    target = kwargs.get("target", None)
    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 4
0
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    if not text:
        return None

    target = kwargs.get("target", None)
    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 5
0
def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import make_url_protocol_relative

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    from r2.lib.utils import UrlParser
    from r2.lib.template_helpers import add_sr
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = make_url_protocol_relative(url)
            tag['src'] = url
        else:
            tag.extract()

    nofollow = True

    text = snudown.markdown(_force_utf8(text),
                            nofollow,
                            target,
                            renderer=snudown.RENDERER_WIKI)

    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')

    if images:
        [img_swap(image) for image in images]

    def add_ext_to_link(link):
        url = UrlParser(link.get('href'))
        if url.is_reddit_url():
            link['href'] = add_sr(link.get('href'), sr_path=False)

    if c.render_style == 'compact':
        links = soup.findAll('a')
        [add_ext_to_link(a) for a in links]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)

    text = str(soup)

    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
Esempio n. 6
0
def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import make_url_protocol_relative

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    from r2.lib.utils import UrlParser
    from r2.lib.template_helpers import add_sr
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = make_url_protocol_relative(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]

    def add_ext_to_link(link):
        url = UrlParser(link.get('href'))
        if url.is_reddit_url():
            link['href'] = add_sr(link.get('href'), sr_path=False)

    if c.render_style == 'compact':
        links = soup.findAll('a')
        [add_ext_to_link(a) for a in links]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
Esempio n. 7
0
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    if not text:
        return None

    # this lets us skip the c.cname lookup (which is apparently quite
    # slow) if target was explicitly passed to this function.
    target = kwargs.get("target", None)
    if "target" not in kwargs and c.cname:
        target = "_top"

    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 8
0
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    if not text:
        return None

    # this lets us skip the c.cname lookup (which is apparently quite
    # slow) if target was explicitly passed to this function.
    target = kwargs.get("target", None)
    if "target" not in kwargs and c.cname:
        target = "_top"

    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 9
0
def emailmarkdown(text, wrap=True):
    if not text:
        return None

    text = snudown.markdown(_force_utf8(text))

    soup = BeautifulSoup(text.decode('utf-8'))
    links = soup.findAll('a')
    update_text = False
    base = g.https_endpoint or g.origin

    for link in links:
        # if link is relative
        if link['href'].startswith('/'):
            update_text = True
            link['href'] = urljoin(base, link['href'])

    if update_text:
        text = str(soup)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
Esempio n. 10
0
def emailmarkdown(text, wrap=True):
    if not text:
        return None

    text = snudown.markdown(_force_utf8(text))

    soup = BeautifulSoup(text.decode('utf-8'))
    links = soup.findAll('a')
    update_text = False
    base = g.https_endpoint or g.origin

    for link in links:
        # if link is relative
        if link['href'].startswith('/'):
            update_text = True
            link['href'] = urljoin(base, link['href'])

    if update_text:
        text = str(soup)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON