Example #1
0
    def POST_options(self, all_langs, pref_lang, **kw):
        #temporary. eventually we'll change pref_clickgadget to an
        #integer preference
        kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
        if c.user.pref_show_promote is None:
            kw['pref_show_promote'] = None
        elif not kw.get('pref_show_promote'):
            kw['pref_show_promote'] = False

        if not kw.get("pref_over_18") or not c.user.pref_over_18:
            kw['pref_no_profanity'] = True

        if kw.get("pref_no_profanity") or c.user.pref_no_profanity:
            kw['pref_label_nsfw'] = True

        # default all the gold options to on if they don't have gold
        if not c.user.gold:
            for pref in ('pref_show_adbox', 'pref_show_sponsors',
                         'pref_show_sponsorships',
                         'pref_highlight_new_comments',
                         'pref_monitor_mentions'):
                kw[pref] = True

        self.set_options(all_langs, pref_lang, **kw)
        u = UrlParser(c.site.path + "prefs")
        u.update_query(done='true')
        if c.cname:
            u.put_in_frame()
        return self.redirect(u.unparse())
Example #2
0
def _update_redirect_uri(base_redirect_uri, params, as_fragment=False):
    parsed = UrlParser(base_redirect_uri)
    if as_fragment:
        parsed.fragment = urlencode(params)
    else:
        parsed.update_query(**params)
    return parsed.unparse()
Example #3
0
    def url_for_title(self, title):
        """Uses the MediaWiki API to get the URL for a wiki page
      with the given title"""
        if title is None:
            return None

        from pylons import g
        cache_key = ('wiki_url_%s' % title).encode('ascii', 'ignore')
        wiki_url = g.cache.get(cache_key)
        if wiki_url is None:
            # http://www.mediawiki.org/wiki/API:Query_-_Properties#info_.2F_in
            api = UrlParser(g.wiki_api_url)
            api.update_query(action='query',
                             titles=title,
                             prop='info',
                             format='yaml',
                             inprop='url')

            try:
                response = urlopen(api.unparse()).read()
                parsed_response = yaml.load(response, Loader=yaml.CLoader)
                page = parsed_response['query']['pages'][0]
            except:
                return None

            wiki_url = page.get('fullurl').strip()

            # Things are created every couple of days so 12 hours seems
            # to be a reasonable cache time
            g.permacache.set(cache_key, wiki_url, time=3600 * 12)

        return wiki_url
Example #4
0
File: post.py Project: vin/reddit
 def POST_options(self, all_langs, pref_lang, **kw):
     self.set_options(all_langs, pref_lang, **kw)
     u = UrlParser(c.site.path + "prefs")
     u.update_query(done = 'true')
     if c.cname:
         u.put_in_frame()
     return self.redirect(u.unparse())
Example #5
0
  def url_for_title(self, title):
      """Uses the MediaWiki API to get the URL for a wiki page
      with the given title"""
      if title is None:
          return None

      from pylons import g
      cache_key = ('wiki_url_%s' % title).encode('ascii', 'ignore')
      wiki_url = g.cache.get(cache_key)
      if wiki_url is None:
          # http://www.mediawiki.org/wiki/API:Query_-_Properties#info_.2F_in
          api = UrlParser(g.wiki_api_url)
          api.update_query(
              action = 'query',
              titles= title,
              prop = 'info',
              format = 'yaml',
              inprop = 'url'
          )

          try:
              response = urlopen(api.unparse()).read()
              parsed_response = yaml.load(response, Loader=yaml.CLoader)
              page = parsed_response['query']['pages'][0]
          except:
              return None

          wiki_url = page.get('fullurl').strip()

          # Things are created every couple of days so 12 hours seems
          # to be a reasonable cache time
          g.permacache.set(cache_key, wiki_url, time=3600 * 12)

      return wiki_url
Example #6
0
    def POST_options(self, all_langs, pref_lang, **kw):
        #temporary. eventually we'll change pref_clickgadget to an
        #integer preference
        kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
        if c.user.pref_show_promote is None:
            kw['pref_show_promote'] = None
        elif not kw.get('pref_show_promote'):
            kw['pref_show_promote'] = False

        if not kw.get("pref_over_18") or not c.user.pref_over_18:
            kw['pref_no_profanity'] = True

        if kw.get("pref_no_profanity") or c.user.pref_no_profanity:
            kw['pref_label_nsfw'] = True

        if kw.get("avatar_img"):
            kw["pref_avatar_img"]= kw.get("avatar_img")


        # default all the gold options to on if they don't have gold
        if not c.user.gold:
            for pref in ('pref_show_adbox',
                         'pref_show_sponsors',
                         'pref_show_sponsorships',
                         'pref_highlight_new_comments',
                         'pref_monitor_mentions'):
                kw[pref] = True

        self.set_options(all_langs, pref_lang, **kw)
        u = UrlParser(c.site.path + "prefs")
        u.update_query(done = 'true')
        if c.cname:
            u.put_in_frame()
        return self.redirect(u.unparse())
Example #7
0
def _update_redirect_uri(base_redirect_uri, params, as_fragment=False):
    parsed = UrlParser(base_redirect_uri)
    if as_fragment:
        parsed.fragment = urlencode(params)
    else:
        parsed.update_query(**params)
    return parsed.unparse()
Example #8
0
    def POST_options(self, all_langs, pref_lang, **kw):
        #temporary. eventually we'll change pref_clickgadget to an
        #integer preference
        kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
        if c.user.pref_show_promote is None:
            kw['pref_show_promote'] = None
        elif not kw.get('pref_show_promote'):
            kw['pref_show_promote'] = False

        if not kw.get("pref_over_18") or not c.user.pref_over_18:
            kw['pref_no_profanity'] = True

        if kw.get("pref_no_profanity") or c.user.pref_no_profanity:
            kw['pref_label_nsfw'] = True

        if not c.user.gold:
            kw['pref_show_adbox'] = True
            kw['pref_show_sponsors'] = True
            kw['pref_show_sponsorships'] = True

        self.set_options(all_langs, pref_lang, **kw)
        u = UrlParser(c.site.path + "prefs")
        u.update_query(done = 'true')
        if c.cname:
            u.put_in_frame()
        return self.redirect(u.unparse())
Example #9
0
    def POST_options(self, all_langs, pref_lang, **kw):
        #temporary. eventually we'll change pref_clickgadget to an
        #integer preference
        kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
        if c.user.pref_show_promote is None:
            kw['pref_show_promote'] = None
        elif not kw.get('pref_show_promote'):
            kw['pref_show_promote'] = False

        if not kw.get("pref_over_18") or not c.user.pref_over_18:
            kw['pref_no_profanity'] = True

        if kw.get("pref_no_profanity") or c.user.pref_no_profanity:
            kw['pref_label_nsfw'] = True

        if not c.user.gold:
            kw['pref_show_adbox'] = True
            kw['pref_show_sponsors'] = True

        self.set_options(all_langs, pref_lang, **kw)
        u = UrlParser(c.site.path + "prefs")
        u.update_query(done='true')
        if c.cname:
            u.put_in_frame()
        return self.redirect(u.unparse())
Example #10
0
    def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
        url = UrlParser(image['url'])
        url.hostname = g.imgix_domain
        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)
        if censor_nsfw:
            # Since we aren't concerned with inhibiting a user's ability to
            # reverse the censoring for privacy reasons, pixellation is better
            # than a Gaussian blur because it compresses well.  The specific
            # value is just "what looks about right".
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=20)
        if g.imgix_signing:
            url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #11
0
def make_feedurl(user, path, ext="rss"):
    try:
        u = UrlParser(path)
        u.update_query(user=user.name, feed=make_feedhash(user, path))
        u.set_extension(ext)
        return u.unparse()
    except:
        return path
Example #12
0
 def make_anchored_permalink(self, link=None, sr=None, context=1, anchor=None):
     if link:
         permalink = UrlParser(self.make_permalink(link, sr))
     else:
         permalink = UrlParser(self.make_permalink_slow())
     permalink.update_query(context=context)
     permalink.fragment = anchor if anchor else self._id36
     return permalink.unparse()
Example #13
0
 def make_anchored_permalink(self, link=None, sr=None, context=1, anchor=None):
     if link:
         permalink = UrlParser(self.make_permalink(link, sr))
     else:
         permalink = UrlParser(self.make_permalink_slow())
     permalink.update_query(context=context)
     permalink.fragment = anchor if anchor else self._id36
     return permalink.unparse()
Example #14
0
 def POST_options(self, all_langs, **prefs):
     filter_prefs(prefs, c.user)
     if c.errors.errors:
         return abort(BadRequestError(errors.INVALID_PREF))
     set_prefs(c.user, prefs)
     c.user._commit()
     u = UrlParser(c.site.path + "prefs")
     u.update_query(done = 'true')
     if c.cname:
         u.put_in_frame()
     return self.redirect(u.unparse())
Example #15
0
 def POST_options(self, all_langs, **prefs):
     filter_prefs(prefs, c.user)
     if c.errors.errors:
         return abort(BadRequestError(errors.INVALID_PREF))
     set_prefs(c.user, prefs)
     c.user._commit()
     u = UrlParser(c.site.path + "prefs")
     u.update_query(done='true')
     if c.cname:
         u.put_in_frame()
     return self.redirect(u.unparse())
Example #16
0
    def POST_options(self, all_langs, pref_lang, **kw):
        #temporary. eventually we'll change pref_clickgadget to an
        #integer preference
        kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0

        self.set_options(all_langs, pref_lang, **kw)
        u = UrlParser(c.site.path + "prefs")
        u.update_query(done = 'true')
        if c.cname:
            u.put_in_frame()
        return self.redirect(u.unparse())
Example #17
0
    def test_sign_url(self):
        u = UrlParser('http://examples.imgix.net/frog.jpg?w=100')
        signed_url = self.provider._sign_url(u, 'abcdef')
        self.assertEqual(signed_url.unparse(),
                'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c')

        u = UrlParser('http://examples.imgix.net/frog.jpg')
        u.update_query(w=100)
        signed_url = self.provider._sign_url(u, 'abcdef')
        self.assertEqual(signed_url.unparse(),
                'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c')
Example #18
0
 def __init__(self, original_path, subreddit, sub_domain):
     Wrapped.__init__(self, original_path=original_path)
     if sub_domain and subreddit and original_path:
         self.title = "%s - %s" % (subreddit.title, sub_domain)
         u = UrlParser(subreddit.path + original_path)
         u.hostname = get_domain(cname = False, subreddit = False)
         u.update_query(**request.get.copy())
         u.put_in_frame()
         self.frame_target = u.unparse()
     else:
         self.title = ""
         self.frame_target = None
Example #19
0
File: pages.py Project: vin/reddit
 def __init__(self, original_path, subreddit, sub_domain):
     Wrapped.__init__(self, original_path=original_path)
     if sub_domain and subreddit and original_path:
         self.title = "%s - %s" % (subreddit.title, sub_domain)
         u = UrlParser(subreddit.path + original_path)
         u.hostname = get_domain(cname = False, subreddit = False)
         u.update_query(**request.get.copy())
         u.put_in_frame()
         self.frame_target = u.unparse()
     else:
         self.title = ""
         self.frame_target = None
Example #20
0
    def test_same_url(self):
        u = UrlParser('http://example.com:8000/a;b?foo=bar&bar=baz#spam')
        u2 = UrlParser('http://example.com:8000/a;b?bar=baz&foo=bar#spam')
        self.assertEquals(u, u2)

        u3 = UrlParser('')
        u3.scheme = 'http'
        u3.hostname = 'example.com'
        u3.port = 8000
        u3.path = '/a'
        u3.params = 'b'
        u3.update_query(foo='bar', bar='baz')
        u3.fragment = 'spam'
        self.assertEquals(u, u3)
Example #21
0
    def test_same_url(self):
        u = UrlParser('http://example.com:8000/a;b?foo=bar&bar=baz#spam')
        u2 = UrlParser('http://example.com:8000/a;b?bar=baz&foo=bar#spam')
        self.assertEquals(u, u2)

        u3 = UrlParser('')
        u3.scheme = 'http'
        u3.hostname = 'example.com'
        u3.port = 8000
        u3.path = '/a'
        u3.params = 'b'
        u3.update_query(foo='bar', bar='baz')
        u3.fragment = 'spam'
        self.assertEquals(u, u3)
Example #22
0
    def test_sign_url(self):
        u = UrlParser('http://examples.imgix.net/frog.jpg?w=100')
        signed_url = self.provider._sign_url(u, 'abcdef')
        self.assertEqual(
            signed_url.unparse(),
            'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c'
        )

        u = UrlParser('http://examples.imgix.net/frog.jpg')
        u.update_query(w=100)
        signed_url = self.provider._sign_url(u, 'abcdef')
        self.assertEqual(
            signed_url.unparse(),
            'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c'
        )
Example #23
0
def _oembed_post(thing, **embed_options):
    subreddit = thing.subreddit_slow
    if (not can_view_link_comments(thing)
            or subreddit.type in Subreddit.private_types):
        raise ForbiddenError(errors.POST_NOT_ACCESSIBLE)

    live = ''
    if embed_options.get('live'):
        time = datetime.now(g.tz).isoformat()
        live = 'data-card-created="{}"'.format(time)

    script = ''
    if not embed_options.get('omitscript', False):
        script = format_html(
            SCRIPT_TEMPLATE,
            embedly_script=EMBEDLY_SCRIPT,
        )

    link_url = UrlParser(thing.make_permalink_slow(force_domain=True))
    link_url.update_query(ref='share', ref_source='embed')

    author_name = ""
    if not thing._deleted:
        author = thing.author_slow
        if author._deleted:
            author_name = _("[account deleted]")
        else:
            author_name = author.name

    html = format_html(
        POST_EMBED_TEMPLATE,
        live_data_attr=live,
        link_url=link_url.unparse(),
        title=websafe(thing.title),
        subreddit_url=make_url_https(subreddit.path),
        subreddit_name=subreddit.name,
        script=script,
    )

    oembed_response = dict(
        _OEMBED_BASE,
        type="rich",
        title=thing.title,
        author_name=author_name,
        html=html,
    )

    return oembed_response
Example #24
0
    def GET_link_id_redirect(self, link):
        if not link:
            abort(404)
        elif not link.subreddit_slow.can_view(c.user):
            # don't disclose the subreddit/title of a post via the redirect url
            abort(403)
        else:
            redirect_url = link.make_permalink_slow(force_domain=True)

        query_params = dict(request.GET)
        if query_params:
            url = UrlParser(redirect_url)
            url.update_query(**query_params)
            redirect_url = url.unparse()

        return self.redirect(redirect_url, code=301)
Example #25
0
    def GET_link_id_redirect(self, link):
        if not link:
            abort(404)
        elif not link.subreddit_slow.can_view(c.user):
            # don't disclose the subreddit/title of a post via the redirect url
            abort(403)
        else:
            redirect_url = link.make_permalink_slow(force_domain=True)
        
        query_params = dict(request.GET)
        if query_params:
            url = UrlParser(redirect_url)
            url.update_query(**query_params)
            redirect_url = url.unparse()

        return self.redirect(redirect_url, code=301)
Example #26
0
def _oembed_post(thing, **embed_options):
    subreddit = thing.subreddit_slow
    if (not can_view_link_comments(thing) or
            subreddit.type in Subreddit.private_types):
        raise ForbiddenError(errors.POST_NOT_ACCESSIBLE)

    live = ''
    if embed_options.get('live'):
        time = datetime.now(g.tz).isoformat()
        live = 'data-card-created="{}"'.format(time)

    script = ''
    if not embed_options.get('omitscript', False):
        script = format_html(SCRIPT_TEMPLATE,
                             embedly_script=EMBEDLY_SCRIPT,
                             )

    link_url = UrlParser(thing.make_permalink_slow(force_domain=True))
    link_url.update_query(ref='share', ref_source='embed')

    author_name = ""
    if not thing._deleted:
        author = thing.author_slow
        if author._deleted:
            author_name = _("[account deleted]")
        else:
            author_name = author.name

    html = format_html(POST_EMBED_TEMPLATE,
                       live_data_attr=live,
                       link_url=link_url.unparse(),
                       title=websafe(thing.title),
                       subreddit_url=make_url_https(subreddit.path),
                       subreddit_name=subreddit.name,
                       script=script,
                       )

    oembed_response = dict(_OEMBED_BASE,
                           type="rich",
                           title=thing.title,
                           author_name=author_name,
                           html=html,
                           )

    return oembed_response
Example #27
0
    def POST_bpoptions(self, all_langs, **prefs):
        u = UrlParser(c.site.path + "prefs")
        bpfilter_prefs(prefs, c.user)
        if c.errors.errors:
            for error in c.errors.errors:
                if error[1] == 'stylesheet_override':
                    u.update_query(error_style_override=error[0])
                else:
                    u.update_query(generic_error=error[0])
            return self.redirect(u.unparse())

        set_prefs(c.user, prefs)
        c.user._commit()
        u.update_query(done='true')
        return self.redirect(u.unparse())
Example #28
0
    def POST_options(self, all_langs, **prefs):
        if feature.is_enabled("autoexpand_media_previews"):
            validator = VOneOf('media_preview', ('on', 'off', 'subreddit'))
            value = request.params.get('media_preview')
            prefs["pref_media_preview"] = validator.run(value)

        u = UrlParser(c.site.path + "prefs")

        filter_prefs(prefs, c.user)
        if c.errors.errors:
            for error in c.errors.errors:
                if error[1] == 'stylesheet_override':
                    u.update_query(error_style_override=error[0])
                else:
                    u.update_query(generic_error=error[0])
            return self.redirect(u.unparse())

        set_prefs(c.user, prefs)
        c.user._commit()
        u.update_query(done='true')
        return self.redirect(u.unparse())
Example #29
0
    def POST_options(self, all_langs, **prefs):
        if feature.is_enabled("autoexpand_media_previews"):
            validator = VOneOf('media_preview', ('on', 'off', 'subreddit'))
            value = request.params.get('media_preview')
            prefs["pref_media_preview"] = validator.run(value)

        u = UrlParser(c.site.path + "prefs")

        filter_prefs(prefs, c.user)
        if c.errors.errors:
            for error in c.errors.errors:
                if error[1] == 'stylesheet_override':
                    u.update_query(error_style_override=error[0])
                else:
                    u.update_query(generic_error=error[0])
            return self.redirect(u.unparse())

        set_prefs(c.user, prefs)
        c.user._commit()
        u.update_query(done='true')
        return self.redirect(u.unparse())
Example #30
0
def _update_redirect_uri(base_redirect_uri, params):
    parsed = UrlParser(base_redirect_uri)
    parsed.update_query(**params)
    return parsed.unparse()
Example #31
0
 def test_unicode_query_params(self):
     u = UrlParser(u'http://example.com/?page=unicode:(')
     u2 = UrlParser('http://example.com/')
     u2.update_query(page=u'unicode:(')
     self.assertEquals(u, u2)
Example #32
0
 def make_permalink(self, link, sr=None):
     permalink = UrlParser(link.make_permalink(sr) + self._id36)
     permalink.update_query(c = 1)
     return permalink.unparse()
Example #33
0
def update_query(base_url, **kw):
    parsed = UrlParser(base_url)
    parsed.update_query(**kw)
    return parsed.unparse()
Example #34
0
 def test_integer_query_params(self):
     u = UrlParser('http://example.com/?page=1234')
     u2 = UrlParser('http://example.com/')
     u2.update_query(page=1234)
     self.assertEquals(u, u2)
Example #35
0
    def resize_image(self,
                     image,
                     width=None,
                     censor_nsfw=False,
                     max_ratio=None):
        url = UrlParser(image['url'])
        url.hostname = g.imgix_domain
        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)
        if censor_nsfw:
            # Do an initial blur to make sure we're getting rid of icky
            # details.
            #
            # http://www.imgix.com/docs/reference/stylize#param-blur
            url.update_query(blur=600)
            # And then add pixellation to help the image compress well.
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=32)
        if g.imgix_signing:
            url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #36
0
def make_feedurl(user, path, ext="rss"):
    u = UrlParser(path)
    u.update_query(user=user.name, feed=make_feedhash(user, path))
    u.set_extension(ext)
    return u.unparse()
Example #37
0
def _update_redirect_uri(base_redirect_uri, params):
    parsed = UrlParser(base_redirect_uri)
    parsed.update_query(**params)
    return parsed.unparse()
def update_query(base_url, **kw):
    parsed = UrlParser(base_url)
    parsed.update_query(**kw)
    return parsed.unparse()
Example #39
0
    def resize_image(self, image, width=None, file_type=None, censor_nsfw=False,
                     max_ratio=None):
        url = UrlParser(image['url'])
        is_gif = url.path.endswith('.gif') and (file_type == 'mp4' or not file_type)

        if is_gif:
            url.hostname = g.imgix_gif_domain
        else:
            url.hostname = g.imgix_domain

        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        # g.s3_media_direct affects how preview image urls are stored
        # True: http://{s3_media_domain}/mybucket/helloworld.jpg
        # False: http://mybucket/helloworld.jpg
        # If it's True, we'll need to strip the bucket out of the path
        if g.s3_media_direct:
            path_parts = url.path.split('/')
            path_parts.pop(1)
            url.path = '/'.join(path_parts)

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)

        if file_type and file_type in ('gif', 'jpg', 'png', 'mp4'):
            url.update_query(fm=file_type)

        # We need to disable fragmented mp4s for proper playback in Firefox
        if file_type == 'mp4':
            url.update_query(**{'mp4-fragmented': 'false'})

        if censor_nsfw:
            # Do an initial blur to make sure we're getting rid of icky
            # details.
            #
            # http://www.imgix.com/docs/reference/stylize#param-blur
            url.update_query(blur=600)
            # And then add pixellation to help the image compress well.
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=32)
        if g.imgix_signing:
            if is_gif:
                url = self._sign_url(url, g.secrets['imgix_gif_signing_token'])
            else:
                url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #40
0
 def test_unicode_query_params(self):
     u = UrlParser(u'http://example.com/?page=unicode:(')
     u2 = UrlParser('http://example.com/')
     u2.update_query(page=u'unicode:(')
     self.assertEquals(u, u2)
Example #41
0
    def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
        url = UrlParser(image['url'])
        url.hostname = g.imgix_domain
        # Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
        # will prevent insecure content warnings on HTTPS pages.
        url.scheme = 'https'

        if max_ratio:
            url.update_query(fit='crop')
            # http://www.imgix.com/docs/reference/size#param-crop
            url.update_query(crop='faces,entropy')
            url.update_query(arh=max_ratio)

        if width:
            if width > image['width']:
                raise NotLargeEnough()
            # http://www.imgix.com/docs/reference/size#param-w
            url.update_query(w=width)
        if censor_nsfw:
            # Do an initial blur to make sure we're getting rid of icky
            # details.
            #
            # http://www.imgix.com/docs/reference/stylize#param-blur
            url.update_query(blur=600)
            # And then add pixellation to help the image compress well.
            #
            # http://www.imgix.com/docs/reference/stylize#param-px
            url.update_query(px=32)
        if g.imgix_signing:
            url = self._sign_url(url, g.secrets['imgix_signing_token'])
        return url.unparse()
Example #42
0
 def test_integer_query_params(self):
     u = UrlParser('http://example.com/?page=1234')
     u2 = UrlParser('http://example.com/')
     u2.update_query(page=1234)
     self.assertEquals(u, u2)