def get_full_path(self, **kwargs): query_string = self.get_query_string(**kwargs) if query_string: query_string = '?' + query_string fragment = self.fragment and '#' + iri_to_uri(self.fragment) or '' return iri_to_uri(self.path) + query_string + fragment
def urls(self): "Returns a list of (value, URL) tuples." # First, check the urls() method for each plugin. plugin_urls = [] for plugin_name, plugin in self.model.model_databrowse().plugins.items(): urls = plugin.urls(plugin_name, self) if urls is not None: #plugin_urls.append(urls) values = self.values() return zip(self.values(), urls) if self.field.rel: m = EasyModel(self.model.site, self.field.rel.to) if self.field.rel.to in self.model.model_list: lst = [] for value in self.values(): url = mark_safe('%s%s/%s/objects/%s/' % ( self.model.site.root_url, m.model._meta.app_label, m.model._meta.module_name, iri_to_uri(value._get_pk_val()))) lst.append((smart_unicode(value), url)) else: lst = [(value, None) for value in self.values()] elif self.field.choices: lst = [] for value in self.values(): url = mark_safe('%s%s/%s/fields/%s/%s/' % ( self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name, iri_to_uri(self.raw_value))) lst.append((value, url)) elif isinstance(self.field, models.URLField): val = self.values()[0] lst = [(val, iri_to_uri(val))] else: lst = [(self.values()[0], None)] return lst
def item_extra_kwargs(self, item): kwargs = { 'when': '%s %s ago' % ( item.when_prefix(), simpletimesince(item.when())) } if item.website_url: kwargs['website_url'] = iri_to_uri(item.website_url) # adjusted is set in self._bulk_adjusted_items. if not item._adjusted[THUMBNAIL_SIZES[0]]: kwargs['thumbnail_url'] = '' else: url = full_url(item._adjusted[THUMBNAIL_SIZES[0]]['url']) kwargs['thumbnail'] = iri_to_uri(url) if self.feed_type is JSONGenerator: # Version 2 of the MC widgets expect a # 'thumbnails_resized' argument which includes thumbnails # of these sizes for the various sizes of widget. These # are only here for backwards compatibility with those # widgets. kwargs['thumbnails_resized'] = [] for size in THUMBNAIL_SIZES[1:]: info_dict = item._adjusted.get(size, {}) url = full_url(info_dict.get('url', '')) kwargs['thumbnails_resized'].append({'width': size[0], 'height': size[1], 'url': url}) if item.embed_code: kwargs['embed_code'] = item.embed_code return kwargs
def get_cache_key(request, key_prefix=None): """ depending on user, request-method gererate a key with respect to GET with querystring and POST with data """ if key_prefix is None: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX if request.method in ('GET', 'HEAD'): method = 'GET' content = md5_constructor(request.GET.urlencode()) elif request.method == 'POST': method = 'POST' content = md5_constructor(request.raw_post_data) else: method = '' content = md5_constructor() path = md5_constructor(iri_to_uri(request.path)) if request.user.is_authenticated(): user = iri_to_uri(request.user) else: user = '' # on response lang was DE again although on request it was EN, so cache that :-) if hasattr(request, '_cache_lang'): lang = request._cache_lang else: lang = get_language() request._cache_lang = lang return '%s.%s.%s.%s.%s.%s.%s' % ( request.get_host(), key_prefix, lang, method, user, path.hexdigest(), content.hexdigest())
def pre_process(self, entry): ''' A hook is used to clean up feed entry data before it is processed. This hook can be used to clean up dates and/or media data before being processed. ''' date_published = entry.get('published', entry.get('updated')) if not date_published: date_published = str(datetime.datetime.utcnow()) date_published = dateutil.parser.parse(date_published) # Change the date to UTC and remove timezone info since MySQL doesn't # support it. utcoffset = date_published.utcoffset() if utcoffset: date_published = date_published - utcoffset date_published = date_published.replace(tzinfo=None) entry['published'] = date_published if 'link' in entry: protocol_index = entry['link'].find("://") if protocol_index != -1: entry['link'] = entry['link'][:protocol_index+3] + iri_to_uri(entry['link'][protocol_index+3:]) else: entry['link'] = iri_to_uri(entry['link'])
def process_request(self, request): path = iri_to_uri(request.get_full_path()) end_index = path.find('?') if -1 != end_index: path = path[:end_index] for pass_url in self._pass_url: if path == pass_url: return if request.user and not request.user.is_authenticated(): auth_url = reverse("get_login_view") auth_client_url = reverse("get_login_client_view") next_url = iri_to_uri(request.get_full_path()) if next_url != auth_url and next_url != auth_client_url: param = "?%s=%s" % (REDIRECT_FIELD_NAME, next_url) if next_url.find('clientinstances') > 0: redirect_to = "".join((auth_client_url, param)) else: redirect_to = "".join((auth_url, param)) elif next_url == auth_client_url: redirect_to = auth_client_url else: redirect_to = auth_url messages.error(request, _("Please log in to continue.")) return shortcuts.redirect(redirect_to)
def processedUrlsToDatabase(ws_report, fname, num): def getRowItems(r): items = r.split(",") # if the line contains simple URLs there should be only 7 commas if len(items) == num: return items # there are more than 7 commas so URLs are complicated, probably have javascript code in them items = getRowWithComplicatedURLs(r, num) if len(items) == num: return items return [] info("Processed urls from: " + fname) try: with open(fname, "r") as csvfile: i = 1 try: for row in csvfile: debug(" ROW: " + row) items = getRowItems(row) if len(items) == num: url1 = iri_to_uri(stripQuotes(items[1])) url2 = iri_to_uri(stripQuotes(items[2])) url3 = iri_to_uri(stripQuotes(items[3])) # info(" URLS: " + items[0] + " url1: " + url1 + " url2: " + url2 + " url3: " + url3) try: p_url = ProcessedURL( ws_report=ws_report, url_requested=url1, url_returned=url2, url_referenced=url3 ) p_url.page_seq_num = int(items[0]) p_url.redirect = url1 != url2 p_url.http_status_code = int(items[4]) p_url.dom_time = int(items[5]) p_url.link_time = int(items[6]) p_url.event_time = int(items[7]) p_url.eval_time = int(items[8]) p_url.save_time = int(items[9]) p_url.total_time = ( int(items[5]) + int(items[6]) + int(items[7]) + int(items[8]) + int(items[9]) ) p_url.save() i += 1 except: error( "** Error creating ProcessedURL object for: " + items[0] + " " + url1 + " from " + url2 ) else: error("** Could not parse row into " + str(num) + " parts: " + row) except: error("** Error reading line: " + str(i) + " in " + fname) except IOError as e: error("******** Error: " + fname + " cannot be opened") error(" I/O error({0}): {1}".format(e.errno, e.strerror))
def __init__(self, title, link, description, language=None, author_email=None, author_name=None, author_link=None, subtitle=None, categories=None, feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs): to_unicode = lambda s: force_unicode(s, strings_only=True) if categories: categories = [force_unicode(c) for c in categories] if ttl is not None: # Force ints to unicode ttl = force_unicode(ttl) self.feed = { 'title': to_unicode(title), 'link': iri_to_uri(link), 'description': to_unicode(description), 'language': to_unicode(language), 'author_email': to_unicode(author_email), 'author_name': to_unicode(author_name), 'author_link': iri_to_uri(author_link), 'subtitle': to_unicode(subtitle), 'categories': categories or (), 'feed_url': iri_to_uri(feed_url), 'feed_copyright': to_unicode(feed_copyright), 'id': feed_guid or link, 'ttl': ttl, } self.feed.update(kwargs) self.items = []
def send_sms(self, text, phone, sms_id): if is_cyrilic_exist(text): coding = u'&coding=2' #Перекодируем текст с помощью временого файла text = iri_to_uri(text)#urllib.quote_plus(text)# () else: coding = u'&coding=0' #print text delivery_iri = u'http://127.0.0.1/gateway/smsstatus/?sms_id=%s&user=%s&password=%s&from=%%p&to=%%P&smsc=%%i&dlr-type=%%d&text=%%b&time=%%t&charset=%%C&coding=%%c&smssys_id=%%I'%(sms_id,KANNEL_DELIVER_USER,KANNEL_DELIVER_PASSWORD) delivery_uri = iri_to_uri(delivery_iri) #self.stdout.write(u'delivery_iri %s\n' % delivery_uri) delivery_url = urllib.quote_plus(delivery_uri) #self.stdout.write(u'delivery_url %s\n\n' % delivery_url) url=u'http://%s:%d/cgi-bin/sendsms?username=%s&password=%s&from=%s&to=%s&text=%s%s&charset=UTF-8&dlr-mask=31&dlr-url=%s' \ % (KANNEL_BEARERBOX_HOST, KANNEL_SENDSMS_PORT, KANNEL_USER, KANNEL_PASSWORD, SENDER, phone, text, coding, delivery_url) #mask = 1+2+4+8+16 = 31 self.stdout.write(u'%s\n' % url) try: f = urllib.urlopen(url) print f.read() f.close() except IOError: self.stdout.write(u'Cant send connection refused\n') return False self.stdout.write(u'Sent\n') return True
def add_item(self, title, link, description, author_email=None, author_name=None, author_link=None, pubdate=None, comments=None, unique_id=None, enclosure=None, categories=(), item_copyright=None, ttl=None, **kwargs): """ Adds an item to the feed. All args are expected to be Python Unicode objects except pubdate, which is a datetime.datetime object, and enclosure, which is an instance of the Enclosure class. """ to_unicode = lambda s: force_unicode(s, strings_only=True) if categories: categories = [to_unicode(c) for c in categories] if ttl is not None: # Force ints to unicode ttl = force_unicode(ttl) item = { 'title': to_unicode(title), 'link': iri_to_uri(link), 'description': to_unicode(description), 'author_email': to_unicode(author_email), 'author_name': to_unicode(author_name), 'author_link': iri_to_uri(author_link), 'pubdate': pubdate, 'comments': comments, 'unique_id': to_unicode(unique_id), 'enclosure': enclosure, 'categories': categories or (), 'item_copyright': to_unicode(item_copyright), 'ttl': ttl, } item.update(kwargs) self.items.append(item)
def item_extra_kwargs(self, item): kwargs = { 'when': '%s %s ago' % ( item.when_prefix(), simpletimesince(item.when())) } if item.website_url: kwargs['website_url'] = iri_to_uri(item.website_url) if item.has_thumbnail: site = Site.objects.get_current() if item.thumbnail_url: kwargs['thumbnail'] = iri_to_uri(item.thumbnail_url) else: default_url = default_storage.url( item.get_resized_thumb_storage_path(375, 295)) if not (default_url.startswith('http://') or default_url.startswith('https://')): default_url = 'http://%s%s' % (site.domain, default_url) kwargs['thumbnail'] = default_url kwargs['thumbnails_resized'] = resized = {} for size in Video.THUMB_SIZES: url = default_storage.url( item.get_resized_thumb_storage_path(*size)) if not (url.startswith('http://') or url.startswith('http://')): url = 'http://%s%s' % (site.domain, url) resized[size] = url if item.embed_code: kwargs['embed_code'] = item.embed_code return kwargs
def getCacheEntry(request, viewType, skipCentralRefresh = False, isData = False): isCache = cacheIsAvailable(request) if isCache: is_json = False # We do this check to always rebuild cache for the page when it called from the crawler if (('HTTP_X_FORWARDED_FOR' in request.META) and (request.META['HTTP_X_FORWARDED_FOR'] in notcachedRemoteAddress) and skipCentralRefresh == False): return None request._cache_update_cache = False if ((('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) or ( 'json' in request.GET)): is_json = True key_prefix = "%s_%s_%s_" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType) if isData==False: try: if request.method == "POST": path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path() + '?' + request.body))) else: path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path()))) except: path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path()))) cache_key = '%s.%s' % (key_prefix, path.hexdigest()) return cache.get(cache_key, None) else: if 'harvester' in request.META['PATH_INFO']: is_json = False key_prefix = "%s_%s_%s_" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType) cache_key = '%s' % (key_prefix) return cache.get(cache_key, None) else: return None
def add_item(self, title, link, description, author_email=None, author_name=None, author_link=None, pubdate=None, comments=None, unique_id=None, unique_id_is_permalink=None, categories=(), item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs): """ Add an item to the feed. All args are expected to be strings except pubdate and updateddate, which are datetime.datetime objects, and enclosures, which is an iterable of instances of the Enclosure class. """ def to_str(s): return str(s) if s is not None else s categories = categories and [to_str(c) for c in categories] self.items.append({ 'title': to_str(title), 'link': iri_to_uri(link), 'description': to_str(description), 'author_email': to_str(author_email), 'author_name': to_str(author_name), 'author_link': iri_to_uri(author_link), 'pubdate': pubdate, 'updateddate': updateddate, 'comments': to_str(comments), 'unique_id': to_str(unique_id), 'unique_id_is_permalink': unique_id_is_permalink, 'enclosures': enclosures or (), 'categories': categories or (), 'item_copyright': to_str(item_copyright), 'ttl': to_str(ttl), **kwargs, })
def templatize_pattern_str( pattern_string, # the URL RegEx kwargs_lookup, # the URL Kwargs dict # the keys are the URL kwargs # the values are the field_names querystrings, # querystring params that are mandatory defaults, # default values when kw lookup misses # some field opt_querystrings=None # optional querystring args, as pagination ): opt_querystrings = opt_querystrings or {} ret_string = pattern_string regex = r'(?:[?P\(<]*)(?P<name>[a-zA-Z0-9_]*)(?:[>\)\[\]\-\._\*]*)' '/(?P<all>\(?P\<(?P<name>[a-zA-Z0-9_]*)>\)[\[\]\-\._\*a-zA-Z0-9_?&%]*)' regex = r'\((.*?)\)' x=0 # find groups for match in re.findall(regex, pattern_string): x+=1 regex2 = '^\?P<(?P<name>[0-9a-zA-Z_]*)>' name = re.findall(regex2, match)[0] regex_local = r'\(\?P<%s>(.*?)\)' % name if name in kwargs_lookup: ret_string = re.sub(regex_local, '{{{var}}}'.format(var=iri_to_uri(kwargs_lookup[name])), ret_string, 1) elif name in defaults: ret_string = re.sub(regex_local, '{var}'.format(var=iri_to_uri(defaults[name])), ret_string, 1) else: raise Exception, match ret_string = re.sub(r'(^\^)', '', ret_string) ret_string = re.sub(r'(\$$)', '', ret_string) if querystrings: (scheme, netloc, path, query, fragment) = urlsplit(ret_string) query_dict = QueryDict(query).copy() for query_param, query_value in querystrings.iteritems(): if query_value in kwargs_lookup: query_dict[query_param] = '{{{val}}}'.format(val=iri_to_uri(kwargs_lookup[query_value])) elif query_value in defaults: query_dict[query_param] = '{val}'.format(val=iri_to_uri(defaults[query_value])) else: raise KeyError, query_value template_string = '' templatized_qs = [] for query_param, query_value in opt_querystrings.iteritems(): if query_value: query_dict[query_param] = '{val}'.format(val=iri_to_uri(query_value)) else: templatized_qs.append(query_param) query = query_dict.urlencode(safe='{}') if templatized_qs: template_expression = '&' if query else '?' template_string = '{{{expression}{query_args}}}'.format(expression=template_expression,query_args=','.join(iri_to_uri(templatized_qs))) ret_string = urlunsplit((scheme, netloc, path, query+template_string, fragment)) return ret_string
def test_iri_to_uri(self): self.assertEqual(iri_to_uri(u"red%09ros\xe9#red"), "red%09ros%C3%A9#red") self.assertEqual(iri_to_uri(u"/blog/for/J\xfcrgen M\xfcnster/"), "/blog/for/J%C3%BCrgen%20M%C3%BCnster/") self.assertEqual( iri_to_uri(u"locations/%s" % urlquote_plus(u"Paris & Orl\xe9ans")), "locations/Paris+%26+Orl%C3%A9ans" )
def test_iri_to_uri(self): self.assertEqual(iri_to_uri(u'red%09ros\xe9#red'), 'red%09ros%C3%A9#red') self.assertEqual(iri_to_uri(u'/blog/for/J\xfcrgen M\xfcnster/'), '/blog/for/J%C3%BCrgen%20M%C3%BCnster/') self.assertEqual(iri_to_uri(u'locations/%s' % urlquote_plus(u'Paris & Orl\xe9ans')), 'locations/Paris+%26+Orl%C3%A9ans')
def delete(self, *args, **kwargs): super(PermissionSet, self).delete(*args, **kwargs) # FIXME: can we use `post_delete` signals or invalidate caches in # model managers, please? username = self.profile.user.username keys = [ iri_to_uri('Permissions:%s' % username), iri_to_uri('projects:accessible:%s' % username), ] cache.delete_many(keys)
def utility(request): """ Add some useful niceties to the context """ base_uri = "%s://%s" % (request.is_secure() and 'https' or 'http', request.get_host()) return dict( site_base_uri=iri_to_uri(base_uri), absolute_uri=iri_to_uri(urljoin(base_uri, request.get_full_path())))
def test_redirect_sharing(self): addon = Addon.objects.get(id=3615) r = self.client.get(reverse('addons.share', args=['a3615']), {'service': 'delicious'}) url = absolutify(unicode(addon.get_url_path())) summary = truncate(addon.summary, length=250) eq_(r.status_code, 302) assert iri_to_uri(addon.name) in r['Location'] assert iri_to_uri(url) in r['Location'] assert iri_to_uri(summary) in r['Location']
def switch_accounts(request): """ A view which allows a user to change which of their Google accounts they're logged in with. The URL for the user to be sent to afterwards should be provided in request.GET['next']. See https://p.ota.to/blog/2014/2/google-multiple-sign-in-on-app-engine/ For the account switching, the user needs to go first to Google's login page. If he/she gets back with the same user, we send them to the logout URL and *then* the login page. Scenario: 1. User clicks a 'switch accounts' link which takes them to this view. 2. We redirect them to the Google login screen where - if they are logged into multiple accounts - they get the opportunity to switch account. 3. Two things may happen: a. They aren't logged into multiple accounts, so Google redirects them straight back to us. As we want them to switch account, we send them back to Google's logout URL with the `continue` url set to the Google login page. => They log into another account. i. They then return to here, where we clear their session and send them on their way. b. They actually switched account, and so they come back with a different account and we redirect them to the original destination set when first visiting this view. See the steps in the code, referring to the steps of the scenario. """ destination = request.GET.get('next', '/') current_google_user = users.get_current_user() # Just making sure we don't save readable info in the session as we can't be sure this session # will be terminated after logout. This is possibly paranoia. user_hash = hashlib.sha1(current_google_user.user_id()).hexdigest() previous_user_hash = request.session.get('previous_user') previous_user_already_redirected = request.session.get('previous_user_already_redirected', False) if previous_user_hash: if user_hash == previous_user_hash and not previous_user_already_redirected: # Step 3.a. django_logout(request) # Make sure old Django user session gets flushed. request.session['previous_user'] = user_hash # but add the previous_user hash back in request.session['previous_user_already_redirected'] = True # We want to create a URL to the logout URL which then goes to the login URL which then # goes back to *this* view, which then goes to the final destination login_url = iri_to_uri(users.create_login_url(request.get_full_path())) logout_url = users.create_logout_url(login_url) return HttpResponseRedirect(logout_url) else: # Step 3.b, or step 2.a.i. del request.session['previous_user'] if 'previous_user_already_redirected' in request.session: del request.session['previous_user_already_redirected'] return HttpResponseRedirect(destination) else: # Step 2: switch_account_url = iri_to_uri(request.get_full_path()) redirect_url = users.create_login_url(switch_account_url) django_logout(request) # Make sure old Django user session gets flushed. request.session['previous_user'] = user_hash return HttpResponseRedirect(redirect_url)
def get_full_path(self, **kwargs): query_string = self.get_query_string(**kwargs) if query_string: query_string = '?%s' % query_string fragment = self.fragment and '#%s' % iri_to_uri(self.fragment) or '' return '%s%s%s' % ( iri_to_uri(self.get_path()), query_string, fragment )
def _set_wordcount_stats_cache(self, stats, key): if key: logging.info('Set wordcount stats for %s' % key) cache.set(iri_to_uri(key + ':get_total_wordcount'), stats['total'], None) cache.set(iri_to_uri(key + ':get_fuzzy_wordcount'), stats[FUZZY], None) cache.set(iri_to_uri(key + ':get_translated_wordcount'), stats[TRANSLATED], None) del self.cache_values[key]['get_total_wordcount'] del self.cache_values[key]['get_fuzzy_wordcount'] del self.cache_values[key]['get_translated_wordcount']
def unprocessedUrlsToDatabase(ws_report, fname, num): def getRowItems(r): items = row.split(",") # if the row is clean of extra commas then there should be the n items if len(items) == num: return items # there are more than n-1 commas so URLs are complicated, probably have javascript code in them items = getRowWithComplicatedURLs(r, num) if len(items) == num: return items return [] info("Unprocessed urls from: " + fname) try: with open(fname, "r") as csvfile: i = 1 try: for row in csvfile: debug(" ROW: " + row) items = getRowItems(row) if len(items) == num: url1 = iri_to_uri(stripQuotes(items[0])) url2 = iri_to_uri(stripQuotes(items[1])) i += 1 try: up_url = UnprocessedURL(ws_report=ws_report, url=url1, url_referenced=url2) up_url.dom_time = int(items[2]) up_url.link_time = int(items[3]) up_url.event_time = int(items[4]) up_url.eval_time = int(items[5]) up_url.save_time = int(items[6]) up_url.total_time = ( int(items[2]) + int(items[3]) + int(items[4]) + int(items[5]) + int(items[6]) ) up_url.save() except: error("** Error creating UnprocessedURL object for: " + url1 + " from " + url2) else: error("** Could not parse row into " + str(num) + " parts: " + row) except: error("** Error reading csv formatted file line: " + str(i) + " in " + fname) except IOError as e: error("******** Error: " + fname + " cannot be opened") error(" I/O error({0}): {1}".format(e.errno, e.strerror))
def render(self, context): try: req = context['request'] except: return '' # No request found in the context: no crumbs... if not hasattr(req,'session'): return # No session found in the context: no crumbs... # Pick up the current crumbs from the session cookie try: cur = req.session['crumbs'] try: cur = cur[req.prefix] except: cur = [(unicode(_('Home')), HOME_CRUMB % (req.prefix, _('Home')), '%s/admin/' % req.prefix)] except: req.session['crumbs'] = {} cur = [(unicode(_('Home')), HOME_CRUMB % (req.prefix, _('Home')), '%s/admin/' % req.prefix)] # Compute the new crumb node try: title = variable_title.resolve(context) except: title = req.get_full_path() # A special case to work around the hardcoded title of the main admin page if title == _('Site administration'): title = _('Home') node = (unicode(title), '<a href="%s%s%s">%s</a>' % ( req.prefix, urlquote(req.path), req.GET and ('?' + iri_to_uri(req.GET.urlencode())) or '', unicode(escape(title)) ), '%s%s%s' % ( req.prefix, urlquote(req.path), req.GET and ('?' + iri_to_uri(req.GET.urlencode())) or '', ), ) # Pop from the stack if the same title is already in the crumbs. cnt = 0 for i in cur: if i[0] == node[0]: cur = cur[0:cnt] # Pop all remaining elements from the stack break cnt += 1 # Keep only a limited number of links in the history. # We delete the second element to keep "home" at the head of the list. while len(cur) > NUMBER_OF_CRUMBS: del cur[1] # Push current URL on the stack cur.append( node ) # Update the current session req.session['crumbs'][req.prefix] = cur # Now create HTML code to return return ' > '.join([i[1] for i in cur])
def item_extra_kwargs(self, item): kwargs = { 'when': '%s %s ago' % ( item.when_prefix(), simpletimesince(item.when())) } if item.website_url: kwargs['website_url'] = iri_to_uri(item.website_url) if item.has_thumbnail: site = Site.objects.get_current() image = None if item.thumbnail_url: thumbnail_url = iri_to_uri(item.thumbnail_url) else: try: image = Image.objects.for_storage_path( item.thumbnail_path) except Image.DoesNotExist: thumbnail_url = '' else: adjusted = AdjustedImage.objects.adjust(image, 375, 295) thumbnail_url = adjusted.adjusted.url if not (thumbnail_url.startswith('http://') or thumbnail_url.startswith('https://')): thumbnail_url = 'http://%s%s' % (site.domain, thumbnail_url) kwargs['thumbnail'] = thumbnail_url if thumbnail_url and self.feed_type is JSONGenerator: # Version 2 of the MC widgets expect a 'thumbnails_resized' # argument which includes thumbnails of these sizes for the # various sizes of widget. These are only here for backwards # compatibility with those widgets. thumbnails_resized = kwargs['thumbnails_resized'] = [] if image is None: image = Image.objects.for_storage_path( item.thumbnail_path) for size in ((222, 169), # large widget (140, 110), # medium widget (88, 68)): # small widget adjusted = AdjustedImage.objects.adjust(image, *size) thumbnail_url = adjusted.adjusted.url if not (thumbnail_url.startswith('http://') or thumbnail_url.startswith('https://')): thumbnail_url = 'http://%s%s' % (site.domain, thumbnail_url) thumbnails_resized.append({'width': size[0], 'height': size[1], 'url': thumbnail_url}) if item.embed_code: kwargs['embed_code'] = item.embed_code return kwargs
def add_item( self, title, link, description, author_email=None, author_name=None, author_link=None, pubdate=None, comments=None, unique_id=None, unique_id_is_permalink=None, enclosure=None, categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs ): """ Adds an item to the feed. All args are expected to be Python Unicode objects except pubdate and updateddate, which are datetime.datetime objects, and enclosure, which is an instance of the Enclosure class. """ to_unicode = lambda s: force_text(s, strings_only=True) if categories: categories = [to_unicode(c) for c in categories] if ttl is not None: # Force ints to unicode ttl = force_text(ttl) item = { "title": to_unicode(title), "link": iri_to_uri(link), "description": to_unicode(description), "author_email": to_unicode(author_email), "author_name": to_unicode(author_name), "author_link": iri_to_uri(author_link), "pubdate": pubdate, "updateddate": updateddate, "comments": to_unicode(comments), "unique_id": to_unicode(unique_id), "unique_id_is_permalink": unique_id_is_permalink, "enclosure": enclosure, "categories": categories or (), "item_copyright": to_unicode(item_copyright), "ttl": ttl, } item.update(kwargs) self.items.append(item)
def __init__(self, metadata, instances, path, site=None, language=None): self.__metadata = metadata if metadata._meta.use_cache: if metadata._meta.use_sites and site: hexpath = md5_constructor(iri_to_uri(site.domain+path)).hexdigest() else: hexpath = md5_constructor(iri_to_uri(path)).hexdigest() if metadata._meta.use_i18n: self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language) else: self.__cache_prefix = 'rollyourown.seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath) else: self.__cache_prefix = None self.__instances_original = instances self.__instances_cache = []
def test_view_language_team_new_member(client, language0, request_users, member, member2): user = request_users["user"] team = language_team.get(language0.__class__)(language0) admin_url = reverse( 'pootle-language-admin-team-new-members', kwargs=dict(language_code=language0.code)) client.login( username=user.username, password=request_users["password"]) if not user.is_superuser: response = client.post(admin_url) assert response.status_code == 403 else: with pytest.raises(Http400): client.post(admin_url) response = client.post(admin_url, data=dict(q="DOES NOT EXIST")) if not user.is_superuser: if user.is_anonymous: assert response.status_code == 402 return assert response.status_code == 403 team.add_member(user, "admin") response = client.post(admin_url, data=dict(q="DOES NOT EXIST")) assert json.loads(response.content)["items"]["results"] == [] search_member = ( member if user == member2 else member2) response = client.post(admin_url, data=dict(q=search_member.username[:2])) result = json.loads(response.content) assert search_member.username in [r["text"] for r in result["items"]["results"]] team = language_team.get(language0.__class__)(language0) team.add_member(search_member, "member") response = client.post(admin_url, data=dict(q=search_member.username[:2])) result = json.loads(response.content) assert ( search_member.username not in [r["text"] for r in result["items"]["results"]]) if user in team.admins: team.remove_member(user) from django.core.cache import cache from django.utils.encoding import iri_to_uri key = iri_to_uri('Permissions:%s' % user.username) key = iri_to_uri('Permissions:%s' % search_member.username) cache.delete(key)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None): args = args or [] kwargs = kwargs or {} if prefix is None: prefix = get_script_prefix() return iri_to_uri(u'%s%s' % (prefix, get_resolver(urlconf).reverse(viewname, *args, **kwargs)))
def add_query_param(request, key, val): """ Add a query parameter to the current request url, and return the new url. """ iri = request.get_full_path() uri = iri_to_uri(iri) return escape(replace_query_param(uri, key, val))
def get_identifier_url(self): url = self.ACCOUNT_TYPES[self.type]['url'].format( identifier=urlquote(self.identifier)) return iri_to_uri(url)
def download_subtitles(request, format): video_id = request.GET.get('video_id') lang_id = request.GET.get('lang_pk') revision = request.GET.get('revision', None) if not video_id: #if video_id == None, Video.objects.get raise exception. Better show 404 #because video_id is required raise Http404 video = get_object_or_404(models.Video, video_id=video_id) if not lang_id: # if no language is passed, assume it's the original one language = video.subtitle_language() if language is None: raise Http404 else: try: language = video.newsubtitlelanguage_set.get(pk=lang_id) except ObjectDoesNotExist: raise Http404 team_video = video.get_team_video() if not team_video: # Non-team videos don't require moderation version = language and language.version(public_only=False, version_number=revision) else: # Members can see all versions member = get_member(request.user, team_video.team) if member: version = language and language.version(public_only=False, version_number=revision) else: version = language and language.version(version_number=revision) if not version: raise Http404 if not format in babelsubs.get_available_formats(): raise HttpResponseServerError("Format not found") subs_text = babelsubs.to(version.get_subtitles(), format, language=version.language_code) # since this is a downlaod, we can afford not to escape tags, specially true # since speaker change is denoted by '>>' and that would get entirely stripped out response = HttpResponse(subs_text, mimetype="text/plain") original_filename = '%s.%s' % (video.lang_filename( language.language_code), format) if not 'HTTP_USER_AGENT' in request.META or u'WebKit' in request.META[ 'HTTP_USER_AGENT']: # Safari 3.0 and Chrome 2.0 accepts UTF-8 encoded string directly. filename_header = 'filename=%s' % original_filename.encode('utf-8') elif u'MSIE' in request.META['HTTP_USER_AGENT']: try: original_filename.encode('ascii') except UnicodeEncodeError: original_filename = 'subtitles.' + format filename_header = 'filename=%s' % original_filename else: # For others like Firefox, we follow RFC2231 (encoding extension in HTTP headers). filename_header = 'filename*=UTF-8\'\'%s' % iri_to_uri( original_filename.encode('utf-8')) response['Content-Disposition'] = 'attachment; ' + filename_header return response
def avatarchoose(request): """ Avatar choose """ profile, created = Profile.objects.get_or_create(user=request.user) images = dict() if hasattr(settings, "AVATAR_QUOTA"): request.upload_handlers.insert(0, QuotaUploadHandler()) if request.method == "POST": form = AvatarForm() if request.POST.get('keyword'): keyword = iri_to_uri(request.POST.get('keyword')) gd_client = gdata.photos.service.PhotosService() feed = gd_client.SearchCommunityPhotos(query="%s&thumbsize=72c" % keyword.split(" ")[0], limit='48') for entry in feed.entry: images[entry.media.thumbnail[0].url] = entry.content.src else: form = AvatarForm(request.POST, request.FILES) if form.is_valid(): image = form.cleaned_data.get('url') or form.cleaned_data.get( 'photo') try: thumb = Image.open(ContentFile(image.read())) except: messages.error(request, _("This image can't be used as an avatar")) else: thumb.thumbnail((480, 480), Image.ANTIALIAS) f = StringIO() save_img_params = SAVE_IMG_PARAMS.get(thumb.format, {}) try: thumb.save(f, thumb.format, **SAVE_IMG_PARAMS.get(thumb.format, {})) except: thumb.save(f, thumb.format) f.seek(0) avatar = Avatar(user=request.user, image="", valid=False) file_ext = image.content_type.split("/")[ 1] # "image/gif" => "gif" if file_ext == 'pjpeg': file_ext = 'jpeg' avatar.image.save( "%s.%s" % (request.user.username, file_ext), ContentFile(f.read())) avatar.save() signal_responses = signals.post_signal.send( sender=avatarchoose, request=request, form=form) return signals.last_response( signal_responses) or HttpResponseRedirect( reverse("profile_avatar_crop")) else: form = AvatarForm() if DEFAULT_AVATAR: base, filename = os.path.split(DEFAULT_AVATAR) filename, extension = os.path.splitext(filename) generic = "%s/%s.%d%s" % (base, filename, DEFAULT_AVATAR_SIZE, extension) generic = generic.replace(settings.MEDIA_ROOT, settings.MEDIA_URL) else: generic = "" template = "member/avatar/choose.html" data = { 'generic': generic, 'form': form, "images": images, 'AVATAR_WEBSEARCH': AVATAR_WEBSEARCH, 'section': 'avatar', 'DEFAULT_AVATAR_SIZE': DEFAULT_AVATAR_SIZE, 'MIN_AVATAR_SIZE': MIN_AVATAR_SIZE } signals.context_signal.send(sender=avatarchoose, request=request, context=data) return render_to_response(template, data, context_instance=RequestContext(request))
def __init__(self, redirect_to): super(HttpResponseSeeOther, self).__init__() self["Location"] = iri_to_uri(redirect_to)
def url(self): return mark_safe( '%s%s/%s/objects/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.model_name, iri_to_uri(self.pk())))
def get_absolute_url(self): # Handle script prefix manually because we bypass reverse() return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
def delete(self, *args, **kwargs): super(PermissionSet, self).delete(*args, **kwargs) # FIXME: can we use `post_delete` signals or invalidate caches in model # managers, please? key = iri_to_uri('Permissions:%s' % self.user.username) cache.delete(key)
def get_absolute_url(self): return iri_to_uri(get_script_prefix().rstrip("/") + self.url)
def __init__(self, url, length, mime_type): "All args are expected to be Python Unicode objects" self.length, self.mime_type = length, mime_type self.url = iri_to_uri(url)
def __init__(self, url, length, mime_type): "All args are expected to be strings" self.length, self.mime_type = length, mime_type self.url = iri_to_uri(url)
def get_absolute_url(self): #построение правильного url return iri_to_uri(get_script_prefix().rstrip('/') + self.slug)
def fetch(self, url_or_implementation, headers=None, method='get', params=None, data=None, expect_json=True, dont_cache=False, refresh_cache=False, retries=None, retry_sleeptime=None): url = implementation = None if isinstance(url_or_implementation, basestring): url = url_or_implementation if retries is None: retries = settings.MIDDLEWARE_RETRIES if retry_sleeptime is None: retry_sleeptime = settings.MIDDLEWARE_RETRY_SLEEPTIME if url.startswith('/'): url = self._complete_url(url) if not headers: if self.http_host: headers = {'Host': self.http_host} else: headers = {} if self.username and self.password: auth = self.username, self.password else: auth = () else: implementation = url_or_implementation cache_key = None cache_file = None if settings.CACHE_MIDDLEWARE and not dont_cache and self.cache_seconds: if url: # Prepare a fake Request object to use it to get the full URL # that will be used. Needed for caching. req = requests.Request( method=method.upper(), url=url, auth=auth, headers=headers, data=data, params=params, ).prepare() cache_key = hashlib.md5(iri_to_uri(req.url)).hexdigest() else: name = implementation.__class__.__name__ cache_key = hashlib.md5(name + unicode(params)).hexdigest() if not refresh_cache: result = cache.get(cache_key) if result is not None: if url: logger.debug("CACHE HIT %s" % url) else: logger.debug("CACHE HIT %s" % implementation.__class__.__name__) return result, True # not in the memcache/locmem but is it in cache files? if settings.CACHE_MIDDLEWARE_FILES: root = settings.CACHE_MIDDLEWARE_FILES if isinstance(root, bool): cache_file = os.path.join(settings.ROOT, 'models-cache') else: cache_file = root # We need to conjure up a string filename to represent # this call. If it's a URL we use the URL path to # as the file path. # If it's an implementation we use the class name if implementation: cache_file = os.path.join( cache_file, implementation.__class__.__name__) else: split = urlparse.urlparse(url) cache_file = os.path.join(cache_file, split.netloc, _clean_path(split.path)) if split.query: cache_file = os.path.join( cache_file, _clean_query(split.query)) if expect_json: cache_file = os.path.join(cache_file, '%s.json' % cache_key) else: cache_file = os.path.join(cache_file, '%s.dump' % cache_key) if os.path.isfile(cache_file): # but is it fresh enough? age = time.time() - os.stat(cache_file)[stat.ST_MTIME] if age > self.cache_seconds: logger.debug("CACHE FILE TOO OLD") os.remove(cache_file) else: logger.debug("CACHE FILE HIT %s" % url) delete_cache_file = False with open(cache_file) as f: if expect_json: try: return json.load(f), True except ValueError: logger.warn( "%s is not a valid JSON file and " "will be deleted" % (cache_file, ), exc_info=True) delete_cache_file = True else: return f.read(), True if delete_cache_file: os.remove(cache_file) if url: if method == 'post': request_method = requests.post logger.info("POSTING TO %s" % url) elif method == 'get': request_method = requests.get logger.info("FETCHING %s" % url) elif method == 'put': request_method = requests.put logger.info("PUTTING TO %s" % url) elif method == 'delete': request_method = requests.delete logger.info("DELETING ON %s" % url) else: raise ValueError(method) try: resp = request_method( url=url, auth=auth, headers=headers, data=data, params=params, ) except requests.ConnectionError: if not retries: raise # https://bugzilla.mozilla.org/show_bug.cgi?id=916886 time.sleep(retry_sleeptime) return self.fetch(url, headers=headers, method=method, data=data, params=params, expect_json=expect_json, dont_cache=dont_cache, retry_sleeptime=retry_sleeptime, retries=retries - 1) if resp.status_code >= 400 and resp.status_code < 500: raise BadStatusCodeError(resp.status_code, resp.content) elif not resp.status_code == 200: raise BadStatusCodeError(resp.status_code, '%s (%s)' % (resp.content, url)) result = resp.content if expect_json: result = ujson.loads(result) else: # e.g. the .get() method on that class instance implementation_method = getattr(implementation, method) result = implementation_method(**params) if cache_key: cache.set(cache_key, result, self.cache_seconds) if cache_file: if not os.path.isdir(os.path.dirname(cache_file)): os.makedirs(os.path.dirname(cache_file)) if expect_json: json.dump(result, open(cache_file, 'w'), indent=2) else: open(cache_file, 'w').write(result) return result, False
def get_absolute_url(self): iri = '/post/%i/%s/' % (self.id, urlquote(self.slug)) return iri_to_uri(iri)
def _generate_cache_header_key(key_prefix, request): """Return a cache key for the header cache.""" url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii')) cache_key = 'views.decorators.cache.cache_header.%s.%s' % (key_prefix, url.hexdigest()) return _i18n_cache_key_suffix(request, cache_key)
def __init__(self, redirect_to): HttpResponse.__init__(self) self['Location'] = iri_to_uri(redirect_to)
def _generate_cache_header_key(key_prefix, request): """Returns a cache key for the header cache.""" path = md5_constructor(iri_to_uri(request.path)) cache_key = 'views.decorators.cache.cache_header.%s.%s' % ( key_prefix, path.hexdigest()) return _i18n_cache_key_suffix(request, cache_key)
def __init__(self, redirect_to, *args, **kwargs): super().__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to) parsed = urlparse(str(redirect_to)) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
def build_absolute_uri(location: str) -> Optional[str]: host = Site.objects.get_current().domain protocol = "https" if settings.ENABLE_SSL else "http" current_uri = "%s://%s" % (protocol, host) location = urljoin(current_uri, location) return iri_to_uri(location)
def createRoom(request): return HttpResponseRedirect( "./room/" + urllib.quote(iri_to_uri(request.POST['roomName']), "") + "/")
def accessible_by_user(cls, user): """Returns a list of project codes accessible by `user`. Checks for explicit `view` permissions for `user`, and extends them with the `default` (if logged-in) and `nobody` users' `view` permissions. Negative `hide` permissions are also taken into account and they'll forbid project access as far as there's no `view` permission set at the same level for the same user. :param user: The ``User`` instance to get accessible projects for. """ if user.is_superuser: key = iri_to_uri('projects:all') else: username = user.username key = iri_to_uri('projects:accessible:%s' % username) user_projects = cache.get(key, None) if user_projects is not None: return user_projects logging.debug(u'Cache miss for %s', key) # FIXME: use `cls.objects.cached_dict().keys()` ALL_PROJECTS = cls.objects.values_list('code', flat=True) if user.is_superuser: user_projects = ALL_PROJECTS else: ALL_PROJECTS = set(ALL_PROJECTS) if user.is_anonymous(): allow_usernames = [username] forbid_usernames = [username, 'default'] else: allow_usernames = list(set([username, 'default', 'nobody'])) forbid_usernames = list(set([username, 'default'])) # Check root for `view` permissions root_permissions = PermissionSet.objects.filter( directory__pootle_path='/', user__username__in=allow_usernames, positive_permissions__codename='view', ) if root_permissions.count(): user_projects = ALL_PROJECTS else: user_projects = set() # Check specific permissions at the project level accessible_projects = cls.objects.filter( directory__permission_sets__positive_permissions__codename='view', directory__permission_sets__user__username__in=allow_usernames, ).values_list('code', flat=True) forbidden_projects = cls.objects.filter( directory__permission_sets__negative_permissions__codename='hide', directory__permission_sets__user__username__in=forbid_usernames, ).values_list('code', flat=True) allow_projects = set(accessible_projects) forbid_projects = set(forbidden_projects) - allow_projects user_projects = (user_projects.union( allow_projects)).difference(forbid_projects) user_projects = list(user_projects) cache.set(key, user_projects, settings.POOTLE_CACHE_TIMEOUT) return user_projects
def url(self): return mark_safe( '%s%s/%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.model_name, self.field.field.name, iri_to_uri(self.value)))
def iriencode(value): """Escapes an IRI value for use in a URL.""" return force_text(iri_to_uri(value))
def make_cache_key(self, name): return iri_to_uri("%s:%s" % (self.cache_key, name))
def handle(self, *args, **options): if len(args) < 2: raise CommandError( "You must provide at least zenossbaseurl and zenossselector") zenossbaseurl = args[0] if not zenossbaseurl.endswith("/"): zenossbaseurl += "/" zenossselector = args[1] zenoss_client = makeHttps(zenossbaseurl, **options) # issue_tracker = self.ensure_issue_tracker(jiraname, jirabaseurl) # project = self.ensure_project(issue_tracker, projectname) logger.info("Getting list of nodes for %s", zenossselector) device_list_url = "%szport/dmd/%s/getSubDevices" % (zenossbaseurl, zenossselector) resp, content = zenoss_client.request(iri_to_uri(device_list_url), "GET") expect_ok(resp, content) # expect_xml(resp, content) # ['BBCApplicationHostDevice at /zport/dmd/Devices/BBC/ApplicationHost/Platform/Delivery/Database/MySQL-Only/devices/db030.back.live.cwwtf.local>', # .... # 'BBCApplicationHostDevice at /zport/dmd/Devices/BBC/ApplicationHost/Platform/Delivery/InterPlatformMQ/Integration/devices/ipmq001.back.int.cwwtf.local'] # get rid of [' and of '] content = content[2:][:-1] # split on , then remove whitespace, then get rid of the start quote ' and end quote ', devlist = [x.strip()[1:][:-1] for x in content.split(",")] # split on " at " and throw away the first part devlist = [x.split(" at ")[1].strip() for x in devlist] # get rid of /zport/dmd/ devlist = [x.replace("/zport/dmd/","", 1) for x in devlist] # get rid of Devices/BBC/ApplicationHost/Platform/Delivery/ devlist = [x.replace(zenossselector + "/","", 1) for x in devlist] # so now we have "InterPlatformMQ/Integration/devices/ipmq001.back.int.cwwtf.local" # split on "/devices/" devlist = [x.split("/devices/") for x in devlist] devlist = [(p.replace("/", "_"), n) for (p, n) in devlist] # so now we have ("InterPlatformMQ_Integration", "ipmq001.back.int.cwwtf.local") def get_env(n): # ipmq001.back.int.cwwtf.local env = n[n.find(".")+1:] # back.int.cwwtf.local env = env.replace("back.", "") # int.cwwtf.local env = env.replace(".local", "") # int.cwwtf env = env.split(".")[0] # int return env pools = {} environments = {} c = 0 for p, n in devlist: e = get_env(n) # an exmaple # n -> db118.back.stage.telhc.local # p -> Database_Postgres # e -> stage if not e in environments: environment, created = Environment.objects.ensure(e) if created: logger.info("Created environment %s", unicode(environment)) environments[e] = environment else: environment = environments[e] pool_success = True # lets be positive :) if not p in pools: logger.info("Ensuring pool %s", unicode(p)) try: pool, created = self.ensure_pool(environment, p) pools[p] = pool except: pass pool_success = False else: pool = pools[p] c += 1 node, created = Node.objects.ensure(environment, n) if pool_success: pool_membership, created = PoolMembership.objects.ensure(pool, node) logger.info("Saw %d nodes", c) crichtonCronJobStatus.objects.update_success('index_zenoss')
def __init__(self, redirect_to, *args, **kwargs): parsed = urlparse(redirect_to) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) super(HttpResponseRedirectBase, self).__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None): if urlconf is None: urlconf = get_urlconf() resolver = get_resolver(urlconf) args = args or [] kwargs = kwargs or {} if prefix is None: prefix = get_script_prefix() if not isinstance(viewname, six.string_types): view = viewname else: parts = viewname.split(':') parts.reverse() view = parts[0] path = parts[1:] resolved_path = [] ns_pattern = '' while path: ns = path.pop() # Lookup the name to see if it could be an app identifier try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver if current_app and current_app in app_list: # If we are reversing for a particular app, # use that namespace ns = current_app elif ns not in app_list: # The name isn't shared by one of the instances # (i.e., the default) so just pick the first instance # as the default. ns = app_list[0] except KeyError: pass try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path))) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: resolver = get_ns_resolver(ns_pattern, resolver) return iri_to_uri( resolver._reverse_with_prefix(view, prefix, *args, **kwargs))
def _cache_key(self, path_obj): """Return cache key for download data""" return iri_to_uri("%s:export_action" % self._query_url(path_obj.pootle_path))
def iriencode(value): """Escape an IRI value for use in a URL.""" return iri_to_uri(value)
def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '')
def _generate_cache_header_key(key_prefix, request): """Returns a cache key for the header cache.""" path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path()))) cache_key = 'views.decorators.cache.cache_header.%s.%s' % ( key_prefix, path.hexdigest()) return _i18n_cache_key_suffix(request, cache_key)