Example #1
0
    def purge_url(self, url):
        """Purge an image (by url) from imgix.

        Reference: http://www.imgix.com/docs/tutorials/purging-images

        Note that as mentioned in the imgix docs, in order to remove
        an image, this function should be used *after* already
        removing the image from our source, or imgix will just re-fetch
        and replace the image with a new copy even after purging.
        """

        p = UrlParser(url)

        if p.hostname == g.imgix_domain:
            p.hostname = g.imgix_purge_domain
        elif p.hostname == g.imgix_gif_domain:
            p.hostname = g.imgix_gif_purge_domain

        url = p.unparse()

        requests.post(
            "https://api.imgix.com/v2/image/purger",
            auth=(g.secrets["imgix_api_key"], ""),
            data={"url": url},
        )
Example #2
0
    def test_url_mutation(self):
        u = UrlParser("http://example.com/")
        u.hostname = g.domain
        self.assertTrue(u.is_reddit_url())

        u = UrlParser("http://%s/" % g.domain)
        u.hostname = "example.com"
        self.assertFalse(u.is_reddit_url())
Example #3
0
    def test_url_mutation(self):
        u = UrlParser("http://example.com/")
        u.hostname = g.domain
        self.assertTrue(u.is_reddit_url())

        u = UrlParser("http://%s/" % g.domain)
        u.hostname = "example.com"
        self.assertFalse(u.is_reddit_url())
Example #4
0
    def resize_image(self, image, width=None, file_type=None, censor_nsfw=False,
                     max_ratio=None):
        url = UrlParser(image['url'])
        is_gif = url.path.endswith('.gif') and (file_type == 'mp4' or not file_type)

        if is_gif:
            url.hostname = g.imgix_gif_domain
        else:
            url.hostname = g.imgix_domain

        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        # g.s3_media_direct affects how preview image urls are stored
        # True: http://{s3_media_domain}/mybucket/helloworld.jpg
        # False: http://mybucket/helloworld.jpg
        # If it's True, we'll need to strip the bucket out of the path
        if g.s3_media_direct:
            path_parts = url.path.split('/')
            path_parts.pop(1)
            url.path = '/'.join(path_parts)

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)

        if file_type and file_type in ('gif', 'jpg', 'png', 'mp4'):
            url.update_query(fm=file_type)

        # We need to disable fragmented mp4s for proper playback in Firefox
        if file_type == 'mp4':
            url.update_query(**{'mp4-fragmented': 'false'})

        if censor_nsfw:
            # Do an initial blur to make sure we're getting rid of icky
            # details.
            #
            # http://www.imgix.com/docs/reference/stylize#param-blur
            url.update_query(blur=600)
            # And then add pixellation to help the image compress well.
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=32)
        if g.imgix_signing:
            if is_gif:
                url = self._sign_url(url, g.secrets['imgix_gif_signing_token'])
            else:
                url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #5
0
def add_sr(path,
           sr_path=True,
           nocname=False,
           force_hostname=False,
           retain_extension=True,
           force_https=False):
    """
    Given a path (which may be a full-fledged url or a relative path),
    parses the path and updates it to include the subreddit path
    according to the rules set by its arguments:

     * sr_path: if a cname is not used for the domain, updates the
       path to include c.site.path.

     * nocname: when updating the hostname, overrides the value of
       c.cname to set the hostname to g.domain.  The default behavior
       is to set the hostname consistent with c.cname.

     * force_hostname: if True, force the url's hostname to be updated
       even if it is already set in the path, and subject to the
       c.cname/nocname combination.  If false, the path will still
       have its domain updated if no hostname is specified in the url.

     * retain_extension: if True, sets the extention according to
       c.render_style.

     * force_https: force the URL scheme to https

    For caching purposes: note that this function uses:
      c.cname, c.render_style, c.site.name
    """
    # don't do anything if it is just an anchor
    if path.startswith(('#', 'javascript:')):
        return path

    u = UrlParser(path)
    if sr_path and (nocname or not c.cname):
        u.path_add_subreddit(c.site)

    if not u.hostname or force_hostname:
        if c.secure:
            u.hostname = request.host
        else:
            u.hostname = get_domain(cname=(c.cname and not nocname),
                                    subreddit=False)

    if (c.secure and u.is_reddit_url()) or force_https:
        u.scheme = "https"

    if retain_extension:
        if c.render_style == 'mobile':
            u.set_extension('mobile')

        elif c.render_style == 'compact':
            u.set_extension('compact')

    return u.unparse()
def add_sr(
        path, sr_path=True, nocname=False, force_hostname=False,
        retain_extension=True, force_https=False):
    """
    Given a path (which may be a full-fledged url or a relative path),
    parses the path and updates it to include the subreddit path
    according to the rules set by its arguments:

     * sr_path: if a cname is not used for the domain, updates the
       path to include c.site.path.

     * nocname: when updating the hostname, overrides the value of
       c.cname to set the hostname to g.domain.  The default behavior
       is to set the hostname consistent with c.cname.

     * force_hostname: if True, force the url's hostname to be updated
       even if it is already set in the path, and subject to the
       c.cname/nocname combination.  If false, the path will still
       have its domain updated if no hostname is specified in the url.

     * retain_extension: if True, sets the extention according to
       c.render_style.

     * force_https: force the URL scheme to https

    For caching purposes: note that this function uses:
      c.cname, c.render_style, c.site.name
    """
    # don't do anything if it is just an anchor
    if path.startswith(('#', 'javascript:')):
        return path

    u = UrlParser(path)
    if sr_path and (nocname or not c.cname):
        u.path_add_subreddit(c.site)

    if not u.hostname or force_hostname:
        if c.secure:
            u.hostname = request.host
        else:
            u.hostname = get_domain(cname = (c.cname and not nocname),
                                    subreddit = False)

    if (c.secure and u.is_reddit_url()) or force_https:
        u.scheme = "https"

    if retain_extension:
        if c.render_style == 'mobile':
            u.set_extension('mobile')

        elif c.render_style == 'compact':
            u.set_extension('compact')

    return u.unparse()
Example #7
0
    def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
        url = UrlParser(image['url'])
        url.hostname = g.imgix_domain
        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)
        if censor_nsfw:
            # Since we aren't concerned with inhibiting a user's ability to
            # reverse the censoring for privacy reasons, pixellation is better
            # than a Gaussian blur because it compresses well.  The specific
            # value is just "what looks about right".
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=20)
        if g.imgix_signing:
            url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #8
0
    def resize_image(self,
                     image,
                     width=None,
                     censor_nsfw=False,
                     max_ratio=None):
        url = UrlParser(image['url'])
        url.hostname = g.imgix_domain
        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)
        if censor_nsfw:
            # Do an initial blur to make sure we're getting rid of icky
            # details.
            #
            # http://www.imgix.com/docs/reference/stylize#param-blur
            url.update_query(blur=600)
            # And then add pixellation to help the image compress well.
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=32)
        if g.imgix_signing:
            url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #9
0
def add_sr(path, sr_path=True, nocname=False, force_hostname=False):
    """
    Given a path (which may be a full-fledged url or a relative path),
    parses the path and updates it to include the subreddit path
    according to the rules set by its arguments:

     * force_hostname: if True, force the url's hotname to be updated
       even if it is already set in the path, and subject to the
       c.cname/nocname combination.  If false, the path will still
       have its domain updated if no hostname is specified in the url.
    
     * nocname: when updating the hostname, overrides the value of
       c.cname to set the hotname to g.domain.  The default behavior
       is to set the hostname consistent with c.cname.

     * sr_path: if a cname is not used for the domain, updates the
       path to include c.site.path.
    """
    u = UrlParser(path)
    if sr_path and (nocname or not c.cname):
        u.path_add_subreddit(c.site)

    if not u.hostname or force_hostname:
        u.hostname = get_domain(cname=(c.cname and not nocname),
                                subreddit=False)

    if c.render_style == 'mobile':
        u.set_extension('mobile')

    return u.unparse()
Example #10
0
    def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
        url = UrlParser(image['url'])
        url.hostname = g.imgix_domain
        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)
        if censor_nsfw:
            # Do an initial blur to make sure we're getting rid of icky
            # details.
            #
            # http://www.imgix.com/docs/reference/stylize#param-blur
            url.update_query(blur=600)
            # And then add pixellation to help the image compress well.
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=32)
        if g.imgix_signing:
            url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
    def POST_request_promo(self, srnames):
        if not srnames:
            return

        srnames = srnames.split('+')

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(srnames)

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        res_by_campaign = {r.campaign: r for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_click_url = r.click_url
            w.num = ""
            return spaceCompress(w.render())
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
Example #12
0
def add_sr(path, sr_path = True, nocname=False, force_hostname = False):
    """
    Given a path (which may be a full-fledged url or a relative path),
    parses the path and updates it to include the subreddit path
    according to the rules set by its arguments:

     * force_hostname: if True, force the url's hotname to be updated
       even if it is already set in the path, and subject to the
       c.cname/nocname combination.  If false, the path will still
       have its domain updated if no hostname is specified in the url.
    
     * nocname: when updating the hostname, overrides the value of
       c.cname to set the hotname to g.domain.  The default behavior
       is to set the hostname consistent with c.cname.

     * sr_path: if a cname is not used for the domain, updates the
       path to include c.site.path.
    """
    u = UrlParser(path)
    if sr_path and (nocname or not c.cname):
        u.path_add_subreddit(c.site)

    if not u.hostname or force_hostname:
        u.hostname = get_domain(cname = (c.cname and not nocname),
                                subreddit = False)

    if c.render_style == 'mobile':
        u.set_extension('mobile')

    return u.unparse()
Example #13
0
    def POST_request_promo(self, srnames, is_mobile_web, platform, loid,
                           is_refresh):
        self.OPTIONS_request_promo()

        if not srnames:
            return

        # backwards compat
        if platform is None:
            platform = "mobile_web" if is_mobile_web else "desktop"

        srnames = srnames.split('+')

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(srnames,
                                  self.get_uid(loid),
                                  platform=platform)

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        # for adservers, adzerk returns markup so we pass it to the client
        if isinstance(response, AdserverResponse):
            g.stats.simple_event('adzerk.request.adserver')
            return responsive(response.body)

        res_by_campaign = {r.campaign: r for r in response}
        adserver_click_urls = {r.campaign: r.click_url for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples,
                                  wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        promote.add_trackers(listing.things,
                             c.site,
                             adserver_click_urls=adserver_click_urls)
        promote.update_served(listing.things)
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            if is_refresh:
                g.stats.simple_event('adzerk.request.auto_refresh')

            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_upvote_pixel = r.upvote_pixel
            w.adserver_downvote_pixel = r.downvote_pixel
            w.adserver_click_url = r.click_url
            w.num = ""
            return responsive(w.render(), space_compress=True)
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
Example #14
0
def add_sr(path,
           sr_path=True,
           nocname=False,
           force_hostname=False,
           retain_extension=True,
           force_https=False,
           force_extension=None):
    """
    Given a path (which may be a full-fledged url or a relative path),
    parses the path and updates it to include the subreddit path
    according to the rules set by its arguments:

     * sr_path: if a cname is not used for the domain, updates the
       path to include c.site.path.

     * nocname: deprecated.

     * force_hostname: if True, force the url's hostname to be updated
       even if it is already set in the path. If false, the path will still
       have its domain updated if no hostname is specified in the url.

     * retain_extension: if True, sets the extention according to
       c.render_style.

     * force_https: force the URL scheme to https

    For caching purposes: note that this function uses:
      c.render_style, c.site.name

    """
    # don't do anything if it is just an anchor
    if path.startswith(('#', 'javascript:')):
        return path

    u = UrlParser(path)
    if sr_path:
        u.path_add_subreddit(c.site)

    if not u.hostname or force_hostname:
        u.hostname = get_domain(subreddit=False)

    if (c.secure and u.is_reddit_url()) or force_https:
        u.scheme = "https"

    if force_extension is not None:
        u.set_extension(force_extension)
    elif retain_extension:
        if c.render_style == 'mobile':
            u.set_extension('mobile')

        elif c.render_style == 'compact':
            u.set_extension('compact')

        # SaidIt CUSTOM
        elif c.render_style == g.extension_subdomain_mobile_v2_render_style:
            u.set_extension(g.extension_subdomain_mobile_v2_render_style)

    return u.unparse()
Example #15
0
 def _key_from_url(cls, url):
     if not utils.domain(url) in g.case_sensitive_domains:
         keyurl = _force_utf8(UrlParser.base_url(url.lower()))
     else:
         # Convert only hostname to lowercase
         up = UrlParser(url)
         up.hostname = up.hostname.lower()
         keyurl = _force_utf8(UrlParser.base_url(up.unparse()))
     return keyurl
Example #16
0
 def _key_from_url(cls, url):
     if not utils.domain(url) in g.case_sensitive_domains:
         keyurl = _force_utf8(UrlParser.base_url(url.lower()))
     else:
         # Convert only hostname to lowercase
         up = UrlParser(url)
         up.hostname = up.hostname.lower()
         keyurl = _force_utf8(UrlParser.base_url(up.unparse()))
     return keyurl
def redirect_to_host(hostname, path=None):
    """Redirect (302) to the specified path and host."""
    if path is None:
        path = request.path

    u = UrlParser(path)
    u.hostname = hostname

    # 307 redirect so request method is retained
    abort(307, location=u.unparse())
def redirect_to_host(hostname, path=None):
    """Redirect (302) to the specified path and host."""
    if path is None:
        path = request.path

    u = UrlParser(path)
    u.hostname = hostname

    # 307 redirect so request method is retained
    abort(307, location=u.unparse())
    def POST_request_promo(self, srnames, is_mobile_web, platform, loid, is_refresh):
        self.OPTIONS_request_promo()

        if not srnames:
            return

        # backwards compat
        if platform is None:
            platform = "mobile_web" if is_mobile_web else "desktop"

        srnames = srnames.split('+')

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(srnames, self.get_uid(loid),
                                  platform=platform)

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        # for adservers, adzerk returns markup so we pass it to the client
        if isinstance(response, AdserverResponse):
            g.stats.simple_event('adzerk.request.adserver')
            return responsive(response.body)

        res_by_campaign = {r.campaign: r for r in response}
        adserver_click_urls = {r.campaign: r.click_url for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        promote.add_trackers(listing.things, c.site, adserver_click_urls=adserver_click_urls)
        promote.update_served(listing.things)
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            if is_refresh:
                g.stats.simple_event('adzerk.request.auto_refresh')

            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_upvote_pixel = r.upvote_pixel
            w.adserver_downvote_pixel = r.downvote_pixel
            w.adserver_click_url = r.click_url
            w.num = ""
            return responsive(w.render(), space_compress=True)
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
Example #20
0
 def __init__(self, original_path, subreddit, sub_domain):
     Wrapped.__init__(self, original_path=original_path)
     if sub_domain and subreddit and original_path:
         self.title = "%s - %s" % (subreddit.title, sub_domain)
         u = UrlParser(subreddit.path + original_path)
         u.hostname = get_domain(cname = False, subreddit = False)
         u.update_query(**request.get.copy())
         u.put_in_frame()
         self.frame_target = u.unparse()
     else:
         self.title = ""
         self.frame_target = None
Example #21
0
File: pages.py Project: vin/reddit
 def __init__(self, original_path, subreddit, sub_domain):
     Wrapped.__init__(self, original_path=original_path)
     if sub_domain and subreddit and original_path:
         self.title = "%s - %s" % (subreddit.title, sub_domain)
         u = UrlParser(subreddit.path + original_path)
         u.hostname = get_domain(cname = False, subreddit = False)
         u.update_query(**request.get.copy())
         u.put_in_frame()
         self.frame_target = u.unparse()
     else:
         self.title = ""
         self.frame_target = None
Example #22
0
    def test_same_url(self):
        u = UrlParser('http://example.com:8000/a;b?foo=bar&bar=baz#spam')
        u2 = UrlParser('http://example.com:8000/a;b?bar=baz&foo=bar#spam')
        self.assertEquals(u, u2)

        u3 = UrlParser('')
        u3.scheme = 'http'
        u3.hostname = 'example.com'
        u3.port = 8000
        u3.path = '/a'
        u3.params = 'b'
        u3.update_query(foo='bar', bar='baz')
        u3.fragment = 'spam'
        self.assertEquals(u, u3)
Example #23
0
    def test_same_url(self):
        u = UrlParser('http://example.com:8000/a;b?foo=bar&bar=baz#spam')
        u2 = UrlParser('http://example.com:8000/a;b?bar=baz&foo=bar#spam')
        self.assertEquals(u, u2)

        u3 = UrlParser('')
        u3.scheme = 'http'
        u3.hostname = 'example.com'
        u3.port = 8000
        u3.path = '/a'
        u3.params = 'b'
        u3.update_query(foo='bar', bar='baz')
        u3.fragment = 'spam'
        self.assertEquals(u, u3)
Example #24
0
    def test_replace_subreddit(self):
        test_cases = [
            ('/r/VIDEOS/', '/r/videos/', '/r/videos/'),
            ('/r/VIDEOS/new/', '/r/videos/', '/r/videos/new/'),
            ('/r/VIDEOS/new/#cats', '/r/videos/', '/r/videos/new/#cats'),
            ('/user/dave/m/cats/', '', '/user/dave/m/cats/'),
        ]

        for test_url, user_path, canonical_url in test_cases:
            subreddit = mock.create_autospec(BaseSite, spec_set=True)
            subreddit.user_path = user_path

            url = UrlParser(test_url).canonicalize_subreddit_path(subreddit)
            url.hostname = 'reddit'
            print test_url, user_path, canonical_url
            self.assertTrue(
                url.is_canonically_equivalent(canonical_url),
                '{0} is not equivalent to {1}'.format(
                    url, UrlParser(canonical_url)),
            )
Example #25
0
    def test_replace_subreddit(self):
        test_cases = [
            ('/r/VIDEOS/', '/r/videos/', '/r/videos/'),
            ('/r/VIDEOS/new/', '/r/videos/', '/r/videos/new/'),
            ('/r/VIDEOS/new/#cats', '/r/videos/', '/r/videos/new/#cats'),
            ('/user/dave/m/cats/', '', '/user/dave/m/cats/'),
        ]

        for test_url, user_path, canonical_url in test_cases:
            subreddit = mock.create_autospec(BaseSite, spec_set=True)
            subreddit.user_path = user_path

            url = UrlParser(test_url).canonicalize_subreddit_path(subreddit)
            url.hostname = 'reddit'
            print test_url, user_path, canonical_url
            self.assertTrue(
                url.is_canonically_equivalent(canonical_url),
                '{0} is not equivalent to {1}'.format(
                    url, UrlParser(canonical_url)),
            )
Example #26
0
    def test_is_canonically_equivalent(self):

        test_cases = [
            ('/cats', '/cats', False),
            ('/cats/', '/cats/', True),
            ('/cats/', 'https://www.reddit.com/cats/', True),
            ('/cats#grumpy', '/cats#grumpy', False),
            ('/cats/#grumpy', '/cats/#grumpy', True),
            ('/cats/#grumpy', '/cats', True),
            ('/dogs?people=False', '/dogs?people=False', False),
            ('/dogs/?people=False', '/dogs/?people=False', True),
            ('/dogs/?cute=True&people=False', '/dogs/?people=False&cute=True',
             True),
        ]

        for test_url, canonical_url, assertion in test_cases:
            parsed = UrlParser(test_url)
            # this is to get around the need to patch the global request
            # object with the appropriate hostname.
            parsed.hostname = 'reddit'
            self.assertEquals(parsed.is_canonically_equivalent(canonical_url),
                              assertion)
Example #27
0
def add_sr(path, sr_path=True, nocname=False, force_hostname=False):
    """
    Given a path (which may be a full-fledged url or a relative path),
    parses the path and updates it to include the subreddit path
    according to the rules set by its arguments:

     * force_hostname: if True, force the url's hotname to be updated
       even if it is already set in the path, and subject to the
       c.cname/nocname combination.  If false, the path will still
       have its domain updated if no hostname is specified in the url.
    
     * nocname: when updating the hostname, overrides the value of
       c.cname to set the hotname to g.domain.  The default behavior
       is to set the hostname consistent with c.cname.

     * sr_path: if a cname is not used for the domain, updates the
       path to include c.site.path.

    For caching purposes: note that this function uses:
      c.cname, c.render_style, c.site.name
    """
    # don't do anything if it is just an anchor
    if path.startswith("#") or path.startswith("javascript:"):
        return path

    u = UrlParser(path)
    if sr_path and (nocname or not c.cname):
        u.path_add_subreddit(c.site)

    if not u.hostname or force_hostname:
        u.hostname = get_domain(cname=(c.cname and not nocname), subreddit=False)

    if c.render_style == "mobile":
        u.set_extension("mobile")

    elif c.render_style == "compact":
        u.set_extension("compact")

    return u.unparse()
Example #28
0
    def POST_request_promo(self, srnames, is_mobile_web):
        self.OPTIONS_request_promo()

        if not srnames:
            return

        srnames = srnames.split('+')

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(srnames, mobile_web=is_mobile_web)

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        res_by_campaign = {r.campaign: r for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples,
                                  wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        promote.add_trackers(listing.things, c.site)
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_click_url = r.click_url
            w.num = ""
            return responsive(w.render(), space_compress=True)
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
Example #29
0
    def test_is_canonically_equivalent(self):

        test_cases = [
            ('/cats', '/cats', False),
            ('/cats/', '/cats/', True),
            ('/cats/', 'https://www.reddit.com/cats/', True),
            ('/cats#grumpy', '/cats#grumpy', False),
            ('/cats/#grumpy', '/cats/#grumpy', True),
            ('/cats/#grumpy', '/cats', True),
            ('/dogs?people=False', '/dogs?people=False', False),
            ('/dogs/?people=False', '/dogs/?people=False', True),
            ('/dogs/?cute=True&people=False',
             '/dogs/?people=False&cute=True', True),
        ]

        for test_url, canonical_url, assertion in test_cases:
            parsed = UrlParser(test_url)
            # this is to get around the need to patch the global request
            # object with the appropriate hostname.
            parsed.hostname = 'reddit'
            self.assertEquals(
                parsed.is_canonically_equivalent(canonical_url),
                assertion
            )
Example #30
0
    def __init__(self, space_compress=None, nav_menus=None, loginbox=True,
                 infotext='', infotext_class=None, content=None,
                 short_description='', title='',
                 robots=None, show_sidebar=True, show_chooser=False,
                 footer=True, srbar=True, page_classes=None, short_title=None,
                 show_wiki_actions=False, extra_js_config=None,
                 show_locationbar=False,
                 **context):
        Templated.__init__(self, **context)
        self.title = title
        self.short_title = short_title
        self.short_description = short_description
        self.robots = robots
        self.infotext = infotext
        self.extra_js_config = extra_js_config
        self.show_wiki_actions = show_wiki_actions
        self.loginbox = loginbox
        self.show_sidebar = show_sidebar
        self.space_compress = space_compress
        # instantiate a footer
        self.footer = RedditFooter() if footer else None
        self.debug_footer = DebugFooter()
        self.supplied_page_classes = page_classes or []

        #put the sort menus at the top
        self.nav_menu = MenuArea(menus = nav_menus) if nav_menus else None

        #add the infobar
        self.welcomebar = None
        self.newsletterbar = None
        self.locationbar = None
        self.infobar = None
        self.mobilewebredirectbar = None

        # generate a canonical link for google
        self.canonical_link = request.fullpath
        if c.render_style != "html":
            u = UrlParser(request.fullpath)
            u.set_extension("")
            u.hostname = g.domain
            if g.domain_prefix:
                u.hostname = "%s.%s" % (g.domain_prefix, u.hostname)
            self.canonical_link = u.unparse()
        # Generate a mobile link for Google.
        u = UrlParser(request.fullpath)
        u.switch_subdomain_by_extension('mobile')
        u.scheme = 'https'
        self.mobile_link = u.unparse()

        if self.show_infobar:
            if not infotext:
                if g.heavy_load_mode:
                    # heavy load mode message overrides read only
                    infotext = strings.heavy_load_msg
                elif g.read_only_mode:
                    infotext = strings.read_only_msg
                elif g.live_config.get("announcement_message"):
                    infotext = g.live_config["announcement_message"]

            if infotext:
                self.infobar = InfoBar(
                    message=infotext, extra_class=infotext_class)
            elif (isinstance(c.site, DomainSR) and
                    is_subdomain(c.site.domain, "imgur.com")):
                self.infobar = InfoBar(message=
                    _("imgur.com domain listings (including this one) are "
                      "currently disabled to speed up vote processing.")
                )
            elif isinstance(c.site, AllMinus) and not c.user.gold:
                self.infobar = InfoBar(message=strings.all_minus_gold_only,
                                       extra_class="gold")

            if not c.user_is_loggedin:
                self.welcomebar = WelcomeBar()
                if feature.is_enabled('newsletter') and getattr(self, "show_newsletterbar", True):
                    self.newsletterbar = NewsletterBar()

            if c.render_style == "compact":
                self.mobilewebredirectbar = MobileWebRedirectBar()

            show_locationbar &= not c.user.pref_hide_locationbar
            if (show_locationbar and c.used_localized_defaults and
                    (not c.user_is_loggedin or
                     not c.user.has_subscribed)):
                self.locationbar = LocationBar()

        self.srtopbar = None
        if srbar and not c.cname and not is_api():
            self.srtopbar = SubredditTopBar()

        panes = [content]

        if c.user_is_loggedin and not is_api() and not self.show_wiki_actions:
            # insert some form templates for js to use
            # TODO: move these to client side templates
            gold_link = GoldPayment("gift",
                                    "monthly",
                                    months=1,
                                    signed=False,
                                    recipient="",
                                    giftmessage=None,
                                    passthrough=None,
                                    thing=None,
                                    clone_template=True,
                                    thing_type="link",
                                   )
            gold_comment = GoldPayment("gift",
                                       "monthly",
                                       months=1,
                                       signed=False,
                                       recipient="",
                                       giftmessage=None,
                                       passthrough=None,
                                       thing=None,
                                       clone_template=True,
                                       thing_type="comment",
                                      )
            report_form = ReportForm()

            if not feature.is_enabled('improved_sharing'):
                panes.append(ShareLink())

            panes.append(report_form)

            if self.show_sidebar:
                panes.extend([gold_comment, gold_link])

            if c.user_is_sponsor:
                panes.append(FraudForm())

        self._content = PaneStack(panes)

        self.show_chooser = (
            show_chooser and
            c.render_style == "html" and
            c.user_is_loggedin and
            (
                isinstance(c.site, (DefaultSR, AllSR, ModSR, LabeledMulti)) or
                c.site.name == g.live_config["listing_chooser_explore_sr"]
            )
        )

        self.toolbars = self.build_toolbars()

        has_style_override = (c.user.pref_default_theme_sr and
                feature.is_enabled('stylesheets_everywhere') and
                c.user.pref_enable_default_themes)
        # if there is no style or the style is disabled for this subreddit
        self.no_sr_styles = (isinstance(c.site, DefaultSR) or
            (not self.get_subreddit_stylesheet_url(c.site) and not c.site.header) or
            (c.user and not c.user.use_subreddit_style(c.site)))

        self.default_theme_sr = DefaultSR()
        # use override stylesheet if they have custom styles disabled or
        # this subreddit has no custom stylesheet (or is the front page)
        if self.no_sr_styles:
            self.subreddit_stylesheet_url = self.get_subreddit_stylesheet_url(
                self.default_theme_sr)
        else:
            self.subreddit_stylesheet_url = self.get_subreddit_stylesheet_url(c.site)

        if has_style_override and self.no_sr_styles:
            sr = Subreddit._by_name(c.user.pref_default_theme_sr)
            # make sure they can still view their override subreddit
            if sr.can_view(c.user) and sr.stylesheet_url:
                self.subreddit_stylesheet_url = self.get_subreddit_stylesheet_url(sr)
                if c.can_apply_styles and c.allow_styles and sr.header:
                    self.default_theme_sr = sr
    def POST_request_promo(
        self,
        site,
        srnames,
        is_mobile_web,
        platform,
        loid,
        is_refresh,
        placements,
        displayed_things,
        referrer,
        user_day,
        user_hour,
        adblock,
    ):
        self.OPTIONS_request_promo()

        if (errors.INVALID_SITE_PATH, "site") in c.errors:
            return abort(reddit_http_error(400, errors.INVALID_SITE_PATH))

        displayed_list = displayed_things.split(',') if displayed_things else []
        if site:
            keywords = promote.keywords_from_context(
                c.user,
                site,
                displayed_things=displayed_list,
            )
        elif srnames:
            keywords = srnames.split('+')
        else:
            return

        properties = promote.properties_from_context(
            context=c,
            keywords=keywords,
            placement="sponsored_headline",
            displayed_things=displayed_list,
            exclude=(None if c.user_is_loggedin else ["age_hours"]),
        )

        properties['user_day'] = user_day
        properties['user_hour'] = user_hour

        if adblock is not None:
            properties["adblock"] = adblock

        # backwards compat
        if platform is None:
            platform = "mobile_web" if is_mobile_web else "desktop"

        if not placements:
            placements = ["div0"]

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(
            keywords=keywords,
            properties=properties,
            user_id=self.get_uid(loid),
            placement_names=placements,
            platform=platform,
            is_refresh=is_refresh,
            referrer=referrer,
            timeout=int(g.live_config.get("ads_loading_timeout_ms", 1000) / 1000.),
        )

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        # for blanks, adzerk returns markup so we pass it to the client
        if isinstance(response, BlankCreativeResponse):
            g.stats.simple_event('adzerk.request.blank')
            return responsive(response.body)

        res_by_campaign = {r.campaign: r for r in response}
        adserver_click_urls = {r.campaign: r.click_url for r in response}
        priorities = {r.campaign: r.priority for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        promote.add_trackers(
            listing.things, c.site,
            adserver_click_urls=adserver_click_urls, priorities=priorities)
        promote.update_served(listing.things)
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            if is_refresh:
                g.stats.simple_event('adzerk.request.auto_refresh')

            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_upvote_pixel = r.upvote_pixel
            w.adserver_downvote_pixel = r.downvote_pixel
            w.adserver_click_url = r.click_url
            w.ecpm = r.ecpm
            w.moat_query = r.moat_query
            w.num = ""
            return responsive(w.render(), space_compress=True)
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
    def POST_request_promo(
        self,
        site,
        srnames,
        is_mobile_web,
        platform,
        loid,
        is_refresh,
        displayed_things,
        referrer,
    ):
        self.OPTIONS_request_promo()

        if (errors.INVALID_SITE_PATH, "site") in c.errors:
            return abort(reddit_http_error(400, errors.INVALID_SITE_PATH))

        displayed_list = displayed_things.split(',') if displayed_things else []
        if site:
            keywords = promote.keywords_from_context(
                c.user,
                site,
                displayed_things=displayed_list,
            )
        elif srnames:
            keywords = srnames.split('+')
        else:
            return

        properties = properties_from_context(c, site)

        # backwards compat
        if platform is None:
            platform = "mobile_web" if is_mobile_web else "desktop"

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(
            keywords=keywords,
            properties=properties,
            user_id=self.get_uid(loid),
            platform=platform,
            is_refresh=is_refresh,
            referrer=referrer,
        )

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        # for adservers, adzerk returns markup so we pass it to the client
        if isinstance(response, AdserverResponse):
            g.stats.simple_event('adzerk.request.adserver')
            return responsive(response.body)

        res_by_campaign = {r.campaign: r for r in response}
        adserver_click_urls = {r.campaign: r.click_url for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        promote.add_trackers(listing.things, c.site, adserver_click_urls=adserver_click_urls)
        promote.update_served(listing.things)
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            if is_refresh:
                g.stats.simple_event('adzerk.request.auto_refresh')

            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_upvote_pixel = r.upvote_pixel
            w.adserver_downvote_pixel = r.downvote_pixel
            w.adserver_click_url = r.click_url
            w.ecpm = r.ecpm
            w.moat_query = r.moat_query
            w.num = ""
            return responsive(w.render(), space_compress=True)
        else:
            g.stats.simple_event('adzerk.request.skip_promo')