def get_context_data(self, **kwargs): context = super(XMPPAccountView, self).get_context_data(**kwargs) context['menuitem'] = self.purpose # Social media action_url = reverse('xmpp_accounts:%s' % self.purpose) context['ACTION_URL'] = self.request.build_absolute_uri(action_url) context['REGISTER_URL'] = self.request.build_absolute_uri( reverse('xmpp_accounts:register')) context['OPENGRAPH_TITLE'] = _messages[self.purpose]['opengraph_title'] % self.request.site context['OPENGRAPH_DESCRIPTION'] = _messages[self.purpose]['opengraph_description'] \ % self.request.site context['TWITTER_TEXT'] = _messages[self.purpose].get('twitter_text', context['OPENGRAPH_TITLE']) if 'CANONICAL_HOST' in self.request.site: context['ACTION_URL'] = urlsplit(context['ACTION_URL'])._replace( netloc=self.request.site['CANONICAL_HOST']).geturl() context['REGISTER_URL'] = urlsplit(context['REGISTER_URL'])._replace( netloc=self.request.site['CANONICAL_HOST']).geturl() # TODO: Yes, that's ugly! form = context['form'] if settings.GPG and hasattr(form, 'cleaned_data') and 'gpg_key' in form.fields: if form['gpg_key'].errors or form['fingerprint'].errors or \ form.cleaned_data.get('fingerprint') or form.cleaned_data.get('gpg_key'): context['show_gpg'] = True return context
def assertUpload(self, filename, content): # First request a slot self.assertEquals(Upload.objects.count(), 0) response = slot(jid=user_jid, name=filename, size=len(content)) self.assertEquals(response.status_code, 200) self.assertEquals(Upload.objects.count(), 1) put_url, get_url = response.content.decode('utf-8').split() # Upload the file put_path = urlsplit(put_url).path response = put(put_path, content) self.assertEquals(response.status_code, 201) # Get the object, verify that the same URLs are generated upload = Upload.objects.all()[0] # we verified there is exactly one above try: self.assertEqual((put_url, get_url), upload.get_urls(response.wsgi_request)) # open the file, verify contents self.assertEqual(six.b(content), upload.file.read()) # try to download it self.assertEqual(upload.file.url, urlsplit(get_url).path) finally: # remove file upload.file.delete(save=True)
def test_user_directed_to_login_page_when_csrf_error(): business_email = '*****@*****.**' password = '******' User.objects.create_user(business_email, password) client = Client(enforce_csrf_checks=True) data = {'username': business_email, 'password': password, 'csrfmiddlewaretoken': 'notavalidtoken'} response = client.post(reverse('login'), data=data, follow=True) assert hasattr(response, 'redirect_chain') assert len(response.redirect_chain) > 0, "Response didn't redirect" assert response.redirect_chain[0][1] == 302 url, _ = response.redirect_chain[-1] scheme, netloc, path, query, fragment = urlsplit(url) assert path == reverse('login') url, _ = response.redirect_chain[-2] scheme, netloc, path, query, fragment = urlsplit(url) assert path == reverse('dashboard') assert response.status_code == 200
def __call__(self, value): value = force_text(value) # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super(URLValidator, self).__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: scheme, netloc, path, query, fragment = urlsplit(value) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super(URLValidator, self).__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) url = value
def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True): """Asserts that a response redirected to a specific URL, and that the redirect URL can be loaded. Note that assertRedirects won't work for external links since it uses TestClient to do a request (use fetch_redirect_response=False to check such links without fetching thtem). """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue(len(response.redirect_chain) > 0, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) self.assertEqual(response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected:" " Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code)) url, status_code = response.redirect_chain[-1] self.assertEqual(response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final" " Response code was %d (expected %d)" % (response.status_code, target_status_code)) else: # Not a followed redirect self.assertEqual(response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) if fetch_redirect_response: redirect_response = response.client.get(path, QueryDict(query)) # Get the redirection page, using the same client that was used # to obtain the original response. self.assertEqual(redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s':" " response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code)) e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url) if not (e_scheme or e_netloc): expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment)) self.assertEqual(url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url))
def get_context_data(self, **kwargs): context = super(AntiSpamFormView, self).get_context_data(**kwargs) context['menuitem'] = getattr(self, 'menuitem', None) # Social media action_url = self.action_url if action_url is not None: action_url = reverse(action_url) context['ACTION_URL'] = self.request.build_absolute_uri(action_url) context['REGISTER_URL'] = self.request.build_absolute_uri('/') if 'CANONICAL_HOST' in self.request.site: context['ACTION_URL'] = urlsplit(context['ACTION_URL'])._replace( netloc=self.request.site['CANONICAL_HOST']).geturl() context['REGISTER_URL'] = urlsplit(context['REGISTER_URL'])._replace( netloc=self.request.site['CANONICAL_HOST']).geturl() context['OPENGRAPH_TITLE'] = self.opengraph_title % self.request.site context['OPENGRAPH_DESCRIPTION'] = self.opengraph_description % self.request.site context['TWITTER_TEXT'] = getattr(self, 'twitter_text', context['OPENGRAPH_TITLE']) form = context['form'] if settings.GPG and hasattr(form, 'cleaned_data') and isinstance(form, EmailMixin): if form['gpg_key'].errors or form['fingerprint'].errors or \ form.cleaned_data.get('fingerprint') or form.cleaned_data.get('gpg_key'): context['show_gpg'] = True return context
def url(self, name, force=False): """ Return the real URL in DEBUG mode. """ if settings.DEBUG and not force: hashed_name, fragment = name, '' else: clean_name, fragment = urldefrag(name) if urlsplit(clean_name).path.endswith('/'): # don't hash paths hashed_name = name else: hashed_name = self.stored_name(clean_name) final_url = super(HashedFilesMixin, self).url(hashed_name) # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax query_fragment = '?#' in name # [sic!] if fragment or query_fragment: urlparts = list(urlsplit(final_url)) if fragment and not urlparts[4]: urlparts[4] = fragment if query_fragment and not urlparts[3]: urlparts[2] += '?' final_url = urlunsplit(urlparts) return unquote(final_url)
def url(self, name, force=False): """ Returns the real URL in DEBUG mode. """ if settings.DEBUG and not force: hashed_name, fragment = name, "" else: clean_name, fragment = urldefrag(name) if urlsplit(clean_name).path.endswith("/"): # don't hash paths hashed_name = name else: cache_key = self.cache_key(name) hashed_name = self.cache.get(cache_key) if hashed_name is None: hashed_name = self.hashed_name(clean_name).replace("\\", "/") # set the cache if there was a miss # (e.g. if cache server goes down) self.cache.set(cache_key, hashed_name) final_url = super(CachedFilesMixin, self).url(hashed_name) # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax query_fragment = "?#" in name # [sic!] if fragment or query_fragment: urlparts = list(urlsplit(final_url)) if fragment and not urlparts[4]: urlparts[4] = fragment if query_fragment and not urlparts[3]: urlparts[2] += "?" final_url = urlunsplit(urlparts) return unquote(final_url)
def __call__(self, request): if request.get_host() in settings.LEGACY_HOSTS: site_parts = urlsplit(settings.SITE_URL) legacy_parts = urlsplit(request.get_full_path()) # Construct the destination URL with the scheme and domain from the # SITE_URL, and the path, querystring, etc from the legacy URL. dest_url = urlunsplit(site_parts[:2] + legacy_parts[2:]) return HttpResponsePermanentRedirect(dest_url) return self.get_response(request)
def smart_urlquote(url): "Quotes a URL if it isn't already quoted." def unquote_quote(segment): segment = unquote(force_str(segment)) # Tilde is part of RFC3986 Unreserved Characters # http://tools.ietf.org/html/rfc3986#section-2.3 # See also http://bugs.python.org/issue16285 segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~')) return force_text(segment) # Handle IDN before quoting. try: scheme, netloc, path, query, fragment = urlsplit(url) except ValueError: # invalid IPv6 URL (normally square brackets in hostname part). return unquote_quote(url) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part return unquote_quote(url) if query: # Separately unquoting key/value, so as to not mix querystring separators # included in query values. See #22267. query_parts = [(unquote(force_str(q[0])), unquote(force_str(q[1]))) for q in parse_qsl(query, keep_blank_values=True)] # urlencode will take care of quoting query = urlencode(query_parts) path = unquote_quote(path) fragment = unquote_quote(fragment) return urlunsplit((scheme, netloc, path, query, fragment))
def _add_query_params(self, url, new_query_params): """Add query parameters onto the given URL. Args: url (unicode): The URL to add query parameters to. new_query_params (dict): The query parameters to add. Returns: unicode: The resulting URL. """ scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) query_params.update(new_query_params) new_query_string = urlencode( [ (key, value) for key, value in sorted(six.iteritems(query_params), key=lambda i: i[0]) ], doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def __call__(self, request): """Redirect if ?lang query parameter is valid.""" query_lang = request.GET.get('lang') if not (query_lang and query_lang in get_kuma_languages()): # Invalid or no language requested, so don't redirect. return self.get_response(request) # Check if the requested language is already embedded in URL language = get_language_from_request(request) script_prefix = get_script_prefix() lang_prefix = '%s%s/' % (script_prefix, language) full_path = request.get_full_path() # Includes querystring old_path = urlsplit(full_path).path new_prefix = '%s%s/' % (script_prefix, query_lang) if full_path.startswith(lang_prefix): new_path = old_path.replace(lang_prefix, new_prefix, 1) else: new_path = old_path.replace(script_prefix, new_prefix, 1) # Redirect to same path with requested language and without ?lang new_query = dict((smart_str(k), v) for k, v in request.GET.items() if k != 'lang') if new_query: new_path = urlparams(new_path, **new_query) response = HttpResponseRedirect(new_path) add_shared_cache_control(response) return response
def hashed_name(self, name, content=None): parsed_name = urlsplit(unquote(name)) clean_name = parsed_name.path.strip() opened = False if content is None: if not self.exists(clean_name): raise ValueError("The file '%s' could not be found with %r." % (clean_name, self)) try: content = self.open(clean_name) except IOError: # Handle directory paths and fragments return name opened = True try: file_hash = self.file_hash(clean_name, content) finally: if opened: content.close() path, filename = os.path.split(clean_name) root, ext = os.path.splitext(filename) if file_hash is not None: file_hash = ".%s" % file_hash hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext)) unparsed_name = list(parsed_name) unparsed_name[2] = hashed_name # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax if '?#' in name and not unparsed_name[3]: unparsed_name[2] += '?' return urlunsplit(unparsed_name)
def _handle_redirects(self, response, **extra): "Follows any redirects by requesting responses from the server using GET." response.redirect_chain = [] while response.status_code in (301, 302, 303, 307): response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra['wsgi.url_scheme'] = url.scheme if url.hostname: extra['SERVER_NAME'] = url.hostname if url.port: extra['SERVER_PORT'] = str(url.port) response = self.get(url.path, QueryDict(url.query), follow=False, **extra) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError("Redirect loop detected.", last_response=response) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response
def build_absolute_uri(request, location, protocol=None): """request.build_absolute_uri() helper Like request.build_absolute_uri, but gracefully handling the case where request is None. """ from .account import app_settings as account_settings if request is None: site = get_current_site() bits = urlsplit(location) if not (bits.scheme and bits.netloc): uri = "{proto}://{domain}{url}".format( proto=account_settings.DEFAULT_HTTP_PROTOCOL, domain=site.domain, url=location ) else: uri = location else: uri = request.build_absolute_uri(location) # NOTE: We only force a protocol if we are instructed to do so # (via the `protocol` parameter, or, if the default is set to # HTTPS. The latter keeps compatibility with the debatable use # case of running your site under both HTTP and HTTPS, where one # would want to make sure HTTPS links end up in password reset # mails even while they were initiated on an HTTP password reset # form. if not protocol and account_settings.DEFAULT_HTTP_PROTOCOL == "https": protocol = account_settings.DEFAULT_HTTP_PROTOCOL # (end NOTE) if protocol: uri = protocol + ":" + uri.partition(":")[2] return uri
def is_valid_repository(self): """Checks if this is a valid Git repository.""" url_parts = urlsplit(self.path) if (url_parts.scheme.lower() in ('http', 'https') and url_parts.username is None and self.username): # Git URLs, especially HTTP(s), that require authentication should # be entered without the authentication info in the URL (because # then it would be visible), but we need it in the URL when testing # to make sure it exists. Reformat the path here to include them. new_netloc = urlquote(self.username, safe='') if self.password: new_netloc += ':' + urlquote(self.password, safe='') new_netloc += '@' + url_parts.netloc path = urlunsplit((url_parts[0], new_netloc, url_parts[2], url_parts[3], url_parts[4])) else: path = self.path p = self._run_git(['ls-remote', path, 'HEAD']) errmsg = p.stderr.read() failure = p.wait() if failure: logging.error("Git: Failed to find valid repository %s: %s" % (self.path, errmsg)) return False return True
def hashed_name(self, name, content=None, filename=None): parsed_name = urlsplit(unquote(name)) clean_name = parsed_name.path.strip() opened = False if content is None: absolute_path = finders.find(clean_name) try: content = open(absolute_path, 'rb') except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise ValueError("The file '%s' could not be found with %r." % (clean_name, self)) else: raise content = File(content) opened = True try: file_hash = self.file_hash(clean_name, content) finally: if opened: content.close() path, filename = os.path.split(clean_name) root, ext = os.path.splitext(filename) if file_hash is not None: file_hash = ".%s" % file_hash hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext)) unparsed_name = list(parsed_name) unparsed_name[2] = hashed_name # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax if '?#' in name and not unparsed_name[3]: unparsed_name[2] += '?' return urlunsplit(unparsed_name)
def get_urls(self, request): location = self.get_absolute_url() if _upload_url is None: put_url = request.build_absolute_uri(location) else: put_url = '%s%s' % (_upload_url, location) if _ws_download is True: get_url = '%s%s/%s/%s' % (settings.MEDIA_URL, _upload_base.strip('/'), self.hash, quote(self.name.encode('utf-8'))) if not urlsplit(get_url).netloc: if _upload_url is None: get_url = request.build_absolute_uri(get_url) else: get_url = '%s%s' % (_upload_url, get_url) else: get_url = put_url if _force_https is True: put_url = put_url.replace('http://', 'https://') get_url = get_url.replace('http://', 'https://') return put_url, get_url
def _do_on_path(self, cb, path, revision=HEAD): if not path: raise FileNotFoundError(path, revision) try: normpath = self.normalize_path(path) # SVN expects to have URLs escaped. Take care to only # escape the path part of the URL. if self.client.is_url(normpath): pathtuple = urlsplit(normpath) path = pathtuple[2] if isinstance(path, six.text_type): path = path.encode("utf-8", "ignore") normpath = urlunsplit((pathtuple[0], pathtuple[1], quote(path), "", "")) normrev = self._normalize_revision(revision) return cb(normpath, normrev) except ClientError as e: exc = bytes(e).decode("utf-8") if "File not found" in exc or "path not found" in exc: raise FileNotFoundError(path, revision, detail=exc) elif "callback_ssl_server_trust_prompt required" in exc: raise SCMError( _( "HTTPS certificate not accepted. Please ensure that " "the proper certificate exists in %s " "for the user that reviewboard is running as." ) % os.path.join(self.config_dir, "auth") ) else: raise SVNTool.normalize_error(e)
def __call__(self, value): super(RedirectURIValidator, self).__call__(value) value = force_text(value) if len(value.split('#')) > 1: raise ValidationError('Redirect URIs must not contain fragments') scheme, netloc, path, query, fragment = urlsplit(value) if scheme.lower() not in self.allowed_schemes: raise ValidationError('Redirect URI scheme is not allowed.')
def absolutify(url): """Joins settings.SITE_URL with a URL path.""" if url.startswith('http'): return url site = urlsplit(settings.SITE_URL) parts = urlsplit(url) scheme = site.scheme netloc = site.netloc path = parts.path query = parts.query fragment = parts.fragment if path == '': path = '/' return urlunparse([scheme, netloc, path, None, query, fragment])
def test_admin_does_allow_unused_urls(admin_client): data = { 'url': 'sitemap2.xml', 'template': 'varlet/pages/layouts/test_template.html' } url = reverse('admin:varlet_page_add') response = admin_client.post(url, data=data, follow=False) assert response.status_code == 302 assert urlsplit(response.url).path == reverse('admin:varlet_page_changelist')
def render(self, context): url = self.url.render(context) parts = urlsplit(url) if parts.query: qs = parts.query + '&tag=' + refcache.get_ref() else: qs = 'tag=' + refcache.get_ref() return urlunsplit((parts.scheme, parts.netloc, parts.path, qs, parts.fragment))
def build_front_uri(location, protocol=None): domain = getattr(settings, "FRONT_DOMAIN", None) bits = urlsplit(location) if not (bits.scheme and bits.netloc): uri = '{proto}://{domain}{url}'.format( proto=account_settings.DEFAULT_HTTP_PROTOCOL, domain=site.domain, url=location) else: uri = location
def replace_query_param(url, key, val): """ Given a URL and a key/val pair, set or replace an item in the query parameters of the URL, and return the new URL. """ (scheme, netloc, path, query, fragment) = urlparse.urlsplit(url) query_dict = QueryDict(query).copy() query_dict[key] = val query = query_dict.urlencode() return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def test_construct_with_per_page(self): """Testing APIPaginator construction with per_page=<value>""" url = 'http://example.com/api/list/?foo=1' paginator = DummyAPIPaginator(None, url, per_page=10) parts = urlsplit(paginator.url) query_params = parse_qs(parts[3]) self.assertEqual(query_params['foo'], ['1']) self.assertEqual(query_params['per-page'], ['10'])
def remove_query_param(url, key): """ Given a URL and a key/val pair, remove an item in the query parameters of the URL, and return the new URL. """ (scheme, netloc, path, query, fragment) = urlparse.urlsplit(url) query_dict = urlparse.parse_qs(query) query_dict.pop(key, None) query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True) return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def replace(self, match, repl): groups = match.groups() parts = urlsplit(match.group('url')) if not self.check_parts(parts): return groups[0] kwargs = self.get_replace_args(parts, match) return repl.format(**kwargs)
def replace_query_param(url, key, val): """ Given a URL and a key/val pair, set or replace an item in the query parameters of the URL, and return the new URL. """ (scheme, netloc, path, query, fragment) = urlparse.urlsplit(url) query_dict = urlparse.parse_qs(query, keep_blank_values=True) query_dict[key] = [val] query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True) return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def __call__(self, value): value = force_text(value) # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super(URLValidator, self).__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: try: scheme, netloc, path, query, fragment = urlsplit(value) except ValueError: # for example, "Invalid IPv6 URL" raise ValidationError(self.message, code=self.code) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super(URLValidator, self).__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) url = value # The maximum length of a full host name is 253 characters per RFC 1034 # section 3.1. It's defined to be 255 bytes or less, but this includes # one byte for the length of the name and one byte for the trailing dot # that's used to indicate absolute names in DNS. if len(urlsplit(value).netloc) > 253: raise ValidationError(self.message, code=self.code)
def _get_referer(request): """Return the HTTP_REFERER, if existing.""" if 'HTTP_REFERER' in request.META: sr = urlsplit(request.META['HTTP_REFERER']) return urlunsplit(('', '', sr.path, sr.query, sr.fragment))
def assertRedirectToLogin(self, response): self.assertEqual(response.status_code, 302) url = response['Location'] e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(url) self.assertEqual(e_path, reverse('account_login'))
def get_scheme(self, request): return urlsplit(request.build_absolute_uri(None)).scheme
def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True): """Asserts that a response redirected to a specific URL, and that the redirect URL can be loaded. Note that assertRedirects won't work for external links since it uses TestClient to do a request (use fetch_redirect_response=False to check such links without fetching thtem). """ if msg_prefix: msg_prefix += ": " e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url) if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue(len(response.redirect_chain) > 0, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) self.assertEqual(response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected:" " Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code)) url, status_code = response.redirect_chain[-1] scheme, netloc, path, query, fragment = urlsplit(url) self.assertEqual(response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final" " Response code was %d (expected %d)" % (response.status_code, target_status_code)) else: # Not a followed redirect self.assertEqual(response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) if fetch_redirect_response: redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https')) # Get the redirection page, using the same client that was used # to obtain the original response. self.assertEqual(redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s':" " response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code)) e_scheme = e_scheme if e_scheme else scheme or 'http' e_netloc = e_netloc if e_netloc else host or 'testserver' expected_url = urlunsplit((e_scheme, e_netloc, e_path, e_query, e_fragment)) self.assertEqual(url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url))
def _get_safe_internal_url(urlstring): """Return the URL without the scheme part and the domain part, if present.""" sr = urlsplit(urlstring) return urlunsplit(('', '', sr.path, sr.query, sr.fragment))
def __call__(self, value): super(AmazonURLValidator, self).__call__(value) netloc = urlsplit(value)[1] domain, tld = netloc.split('.')[-2:] if domain != 'amazon' or tld != 'com': raise ValidationError(self.message, code=self.code)
def check_active(url, element, **kwargs): '''check "active" url, apply css_class''' menu = yesno_to_bool(kwargs['menu'], 'menu') ignore_params = yesno_to_bool(kwargs['ignore_params'], 'ignore_params') # check missing href parameter if not url.attrib.get('href', None) is None: # get href attribute href = url.attrib['href'].strip() # href="#" is often used when links shouldn't be handled by browsers. # For example, Bootstrap uses this for expandable menus on # small screens, see # https://getbootstrap.com/docs/4.0/components/navs/#using-dropdowns if href == '#': return False # split into urlparse object href = urlparse.urlsplit(href) # cut off hashtag (anchor) href = href._replace(fragment='') # cut off get params (?key=var&etc=var2) if ignore_params: href = href._replace(query='') kwargs['full_path'] = urlparse.urlunsplit( urlparse.urlsplit( kwargs['full_path'] )._replace(query='') ) # build urlparse object back into string href = urlparse.urlunsplit(href) # check empty href if href == '': # replace href with current location href = kwargs['full_path'] # compare full_path with href according to menu configuration if menu: # try mark "root" (/) url as "active", in equals way if href == '/' == kwargs['full_path']: logic = True # skip "root" (/) url, otherwise it will be always "active" elif href != '/': # start with logic logic = ( kwargs['full_path'].startswith(href) or # maybe an urlquoted href was supplied urlquote(kwargs['full_path']).startswith(href) or kwargs['full_path'].startswith(urlquote(href)) ) else: logic = False else: # equals logic logic = ( kwargs['full_path'] == href or # maybe an urlquoted href was supplied urlquote(kwargs['full_path']) == href or kwargs['full_path'] == urlquote(href) ) # "active" url found if logic: # check parent tag has "class" attribute or it is empty if element.attrib.get('class'): # prevent multiple "class" attribute adding if kwargs['css_class'] not in element.attrib['class']: # append "active" class element.attrib['class'] += ' {css_class}'.format( css_class=kwargs['css_class'], ) else: # create or set (if empty) "class" attribute element.attrib['class'] = kwargs['css_class'] return True # no "active" urls found return False
def normalize_path(url): url = urlsplit(url).path.rstrip('/') return '/' + url if not url.startswith('/') else url
def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets. Args: request (django.http.HttpRequest): The HTTP request from the client. webhook_targets (list of reviewboard.notifications.models.WebHookTarget): The list of WebHook targets containing endpoint URLs to dispatch to. event (unicode): The name of the event being dispatched. payload (dict): The payload data to encode for the WebHook payload. Raises: ValueError: There was an error with the payload format. Details are in the log and the exception message. """ encoder = BasicAPIEncoder() bodies = {} raw_norm_payload = None json_norm_payload = None for webhook_target in webhook_targets: use_custom_content = webhook_target.use_custom_content encoding = webhook_target.encoding # See how we need to handle normalizing this payload. If we need # something JSON-safe, then we need to go the more aggressive route # and normalize keys to strings. if raw_norm_payload is None or json_norm_payload is None: try: if (raw_norm_payload is None and (use_custom_content or encoding == webhook_target.ENCODING_XML)): # This payload's going to be provided for XML and custom # templates. We don't want to alter the keys at all. raw_norm_payload = normalize_webhook_payload( payload=payload, request=request) elif (json_norm_payload is None and not use_custom_content and encoding in (webhook_target.ENCODING_JSON, webhook_target.ENCODING_FORM_DATA)): # This payload's going to be provided for JSON or # form-data. We want to normalize all keys to strings. json_norm_payload = normalize_webhook_payload( payload=payload, request=request, use_string_keys=True) except TypeError as e: logging.exception( 'WebHook payload passed to ' 'dispatch_webhook_event containing invalid ' 'data types: %s', e) raise ValueError(six.text_type(e)) if use_custom_content: try: assert raw_norm_payload is not None body = render_custom_content(webhook_target.custom_content, raw_norm_payload) body = force_bytes(body) except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: assert json_norm_payload is not None adapter = JSONEncoderAdapter(encoder) body = adapter.encode(json_norm_payload, request=request) elif encoding == webhook_target.ENCODING_XML: assert raw_norm_payload is not None adapter = XMLEncoderAdapter(encoder) body = adapter.encode(raw_norm_payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: assert json_norm_payload is not None adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(json_norm_payload, request=request), }) else: logging.error( 'Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue body = force_bytes(body) bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()).encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url url_parts = urlsplit(url) if url_parts.username or url_parts.password: credentials, netloc = url_parts.netloc.split('@', 1) url = urlunsplit((url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment)) headers[b'Authorization'] = \ b'Basic %s' % b64encode(credentials.encode('utf-8')) urlopen(Request(url.encode('utf-8'), body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e) if isinstance(e, HTTPError): logging.info('Error response from %s: %s %s\n%s', webhook_target.url, e.code, e.reason, e.read())
def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets. Args: request (django.http.HttpRequest): The HTTP request from the client. webhook_targets (list of reviewboard.notifications.models.WebHookTarget): The list of WebHook targets containing endpoint URLs to dispatch to. event (unicode): The name of the event being dispatched. payload (dict): The payload data to encode for the WebHook payload. Raises: ValueError: There was an error with the payload format. Details are in the log and the exception message. """ try: payload = normalize_webhook_payload(payload, request) except TypeError as e: logging.exception('WebHook payload passed to dispatch_webhook_event ' 'containing invalid data types: %s', e) raise ValueError(six.text_type(e)) encoder = BasicAPIEncoder() bodies = {} for webhook_target in webhook_targets: if webhook_target.use_custom_content: try: body = render_custom_content(webhook_target.custom_content, payload) body = body.encode('utf-8') except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: encoding = webhook_target.encoding if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: adapter = JSONEncoderAdapter(encoder) body = adapter.encode(payload, request=request) body = body.encode('utf-8') elif encoding == webhook_target.ENCODING_XML: adapter = XMLEncoderAdapter(encoder) body = adapter.encode(payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(payload, request=request), }) body = body.encode('utf-8') else: logging.error('Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()) .encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url url_parts = urlsplit(url) if url_parts.username or url_parts.password: netloc = url_parts.netloc.split('@', 1)[1] url = urlunsplit( (url_parts.scheme, netloc, url_parts.path, url_parts.params, url_parts.query)) password_mgr = HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password( None, url, url_parts.username, url_parts.password) handler = HTTPBasicAuthHandler(password_mgr) opener = build_opener(handler) else: opener = build_opener() opener.open(Request(url.encode('utf-8'), body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e)
def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets.""" encoder = ResourceAPIEncoder() bodies = {} for webhook_target in webhook_targets: if webhook_target.use_custom_content: try: body = render_custom_content(webhook_target.custom_content, payload) body = body.encode('utf-8') except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: encoding = webhook_target.encoding if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: adapter = JSONEncoderAdapter(encoder) body = adapter.encode(payload, request=request) body = body.encode('utf-8') elif encoding == webhook_target.ENCODING_XML: adapter = XMLEncoderAdapter(encoder) body = adapter.encode(payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(payload, request=request), }) body = body.encode('utf-8') else: logging.error( 'Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()).encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url url_parts = urlsplit(url) if url_parts.username or url_parts.password: netloc = url_parts.netloc.split('@', 1)[1] url = urlunsplit((url_parts.scheme, netloc, url_parts.path, url_parts.params, url_parts.query)) password_mgr = HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, url, url_parts.username, url_parts.password) handler = HTTPBasicAuthHandler(password_mgr) opener = build_opener(handler) else: opener = build_opener() opener.open(Request(url.encode('utf-8'), body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e)
def get_protocol(url): scheme, netloc, path, query, fragment = urlsplit(url) print 'scheme: ' + scheme return scheme
def is_url(s): try: split_url = urlsplit(s) except ValueError: return False return split_url.scheme in ('http', 'https')