def test_call_app(self): url = reverse('two_factor_twilio:call_app', args=['123456']) response = self.client.get(url) self.assertEqual(response.content, b'<?xml version="1.0" encoding="UTF-8" ?>' b'<Response>' b' <Gather timeout="15" numDigits="1" finishOnKey="">' b' <Say language="en">Hi, this is testserver calling. ' b'Press any key to continue.</Say>' b' </Gather>' b' <Say language="en">You didn\'t press any keys. Good bye.</Say>' b'</Response>') url = reverse('two_factor_twilio:call_app', args=['123456']) response = self.client.post(url) self.assertEqual(response.content, b'<?xml version="1.0" encoding="UTF-8" ?>' b'<Response>' b' <Say language="en">Your token is 1. 2. 3. 4. 5. 6. ' b'Repeat: 1. 2. 3. 4. 5. 6. Good bye.</Say>' b'</Response>') # there is a en-gb voice response = self.client.get('%s?%s' % (url, urlencode({'locale': 'en-gb'}))) self.assertContains(response, '<Say language="en-gb">') # there is no Frysian voice response = self.client.get('%s?%s' % (url, urlencode({'locale': 'fy-nl'}))) self.assertContains(response, '<Say language="en">')
def build_links(self, request=None): links = {} if request is not None: links["self"] = request.build_absolute_uri(request.path) page = self._current_page if page is not None: if page.has_previous(): u = urlparse(links["self"]) q = parse_qs(u.query) q["page[number]"] = str(page.previous_page_number()) links["prev"] = ParseResult( u.scheme, u.netloc, u.path, u.params, urlencode(q), u.fragment, ).geturl() if page.has_next(): u = urlparse(links["self"]) q = parse_qs(u.query) q["page[number]"] = str(page.next_page_number()) links["next"] = ParseResult( u.scheme, u.netloc, u.path, u.params, urlencode(q), u.fragment, ).geturl() return links
def add_pushover(request): if settings.PUSHOVER_API_TOKEN is None or settings.PUSHOVER_SUBSCRIPTION_URL is None: raise Http404("pushover integration is not available") if request.method == "POST": # Initiate the subscription nonce = get_random_string() request.session["po_nonce"] = nonce failure_url = settings.SITE_ROOT + reverse("hc-channels") success_url = settings.SITE_ROOT + reverse("hc-add-pushover") + "?" + urlencode({ "nonce": nonce, "prio": request.POST.get("po_priority", "0"), }) subscription_url = settings.PUSHOVER_SUBSCRIPTION_URL + "?" + urlencode({ "success": success_url, "failure": failure_url, }) return redirect(subscription_url) # Handle successful subscriptions if "pushover_user_key" in request.GET: if "nonce" not in request.GET or "prio" not in request.GET: return HttpResponseBadRequest() # Validate nonce if request.GET["nonce"] != request.session.get("po_nonce"): return HttpResponseForbidden() # Validate priority if request.GET["prio"] not in ("-2", "-1", "0", "1", "2"): return HttpResponseBadRequest() # All looks well-- del request.session["po_nonce"] if request.GET.get("pushover_unsubscribed") == "1": # Unsubscription: delete all Pushover channels for this user Channel.objects.filter(user=request.user, kind="po").delete() return redirect("hc-channels") else: # Subscription user_key = request.GET["pushover_user_key"] priority = int(request.GET["prio"]) channel = Channel(user=request.team.user, kind="po") channel.value = "%s|%d" % (user_key, priority) channel.save() channel.assign_all_checks() return redirect("hc-channels") # Show Integration Settings form ctx = { "page": "channels", "po_retry_delay": td(seconds=settings.PUSHOVER_EMERGENCY_RETRY_DELAY), "po_expiration": td(seconds=settings.PUSHOVER_EMERGENCY_EXPIRATION), } return render(request, "integrations/add_pushover.html", ctx)
def _get_request(self, request_url, request_method, **params): """ Return a Request object that has the GET parameters attached to the url or the POST data attached to the object. """ if request_method == 'GET': if params: request_url += '&%s' % urlencode(params) request = Request(request_url) elif request_method == 'POST': request = Request(request_url, urlencode(params, doseq=1)) return request
def test_do_temp_upload(test): """ Test the temporary upload (used with the FileBrowseUploadField) We use the standard test directory here (no special upload dir needed). """ filebrowser.sites.UPLOAD_TEMPDIR = test.tmpdir.path_relative_directory url = reverse('%s:fb_do_upload' % test.site_name) url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg', 'temporary': 'true'})]) with open(os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f: file_size = os.path.getsize(f.name) response = test.c.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # Check we get OK response test.assertTrue(response.status_code == 200) data = json.loads(response.content) test.assertEqual(data["filename"], "testimage.jpg") test.assertEqual(data["temp_filename"], os.path.join(test.tmpdir.path_relative_directory, "testimage.jpg")) # Check the file now exists path = os.path.join(test.tmpdir.path, 'testimage.jpg') test.testfile = FileObject(path, site=test.site) test.assertTrue(test.site.storage.exists(path)) # Check the file has the correct size test.assertTrue(file_size == test.site.storage.size(path)) # Check permissions if DEFAULT_PERMISSIONS is not None: permissions_default = oct(DEFAULT_PERMISSIONS) permissions_file = oct(os.stat(test.testfile.path_full).st_mode & 0o777) test.assertTrue(permissions_default == permissions_file)
def get_querystring(self, **kwargs): query_dict = dict( version='1.2.3', ADMIN_MEDIA_PREFIX='', ) query_dict.update(kwargs) return urlencode(query_dict)
def _do_paginator(context, adjacent_pages, template_name): if template_name is None: template_name = ("inclusion_tags/paginator.html", "inc/paginator.html") else: template_name = ("inclusion_tags/paginator_%s.html" % template_name, "inc/paginator_%s.html" % template_name) if not "page" in context: # improper use of paginator tag, bail out return template_name, {} query_params = "?p=" if "request" in context: get = context["request"].GET query_params = "?%s&p=" % urlencode( tuple((k, smart_str(v)) for (k, v) in sorted(six.iteritems(get)) if k != "p") ) page = context["page"] page_no = int(page.number) s = max(1, page_no - adjacent_pages - max(0, page_no + adjacent_pages - page.paginator.num_pages)) page_numbers = list(range(s, min(page.paginator.num_pages, s + 2 * adjacent_pages) + 1)) return ( template_name, { "query_params": query_params, "page": page, "results_per_page": page.paginator.per_page, "page_numbers": page_numbers, "show_first": 1 not in page_numbers, "show_last": page.paginator.num_pages not in page_numbers, }, )
def _add_query_params(self, url, new_query_params): """Add query parameters onto the given URL. Args: url (unicode): The URL to add query parameters to. new_query_params (dict): The query parameters to add. Returns: unicode: The resulting URL. """ scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) query_params.update(new_query_params) new_query_string = urlencode( [ (key, value) for key, value in sorted(six.iteritems(query_params), key=lambda i: i[0]) ], doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def purge(self, url): try: response = urlopen( "https://www.cloudflare.com/api_json.html", data=urlencode( { "email": self.cloudflare_email, "tkn": self.cloudflare_token, "a": "zone_file_purge", "z": urlparse(url).netloc, "url": url, } ).encode("utf-8"), ) except HTTPError as e: logger.error("Couldn't purge '%s' from Cloudflare. HTTPError: %d %s", url, e.code, e.reason) return except URLError as e: logger.error("Couldn't purge '%s' from Cloudflare. URLError: %s", url, e.reason) return response_json = json.loads(response.read().decode("utf-8")) if response_json["result"] == "error": logger.error("Couldn't purge '%s' from Cloudflare. Cloudflare error '%s'", url, response_json["msg"]) return
def methodNext(self, previous_request, previous_response): """Retrieves the next page of results. Args: previous_request: The request for the previous page. (required) previous_response: The response from the request for the previous page. (required) Returns: A request object that you can call 'execute()' on to request the next page. Returns None if there are no more items in the collection. """ # Retrieve nextPageToken from previous_response # Use as pageToken in previous_request to create new request. if 'nextPageToken' not in previous_response: return None request = copy.copy(previous_request) pageToken = previous_response['nextPageToken'] parsed = list(urlparse(request.uri)) q = parse_qsl(parsed[4]) # Find and remove old 'pageToken' value from URI newq = [(key, value) for (key, value) in q if key != 'pageToken'] newq.append(('pageToken', pageToken)) parsed[4] = urlencode(newq) uri = urlunparse(parsed) request.uri = uri logger.info('URL being requested: %s %s' % (methodName,uri)) return request
def test_get(self): """ Check the detail view and version generation. Check also renaming of files. """ response = self.client.get(self.url, {'dir': self.F_IMAGE.dirname, 'filename': self.F_IMAGE.filename}) self.assertTrue(response.status_code == 200) # At this moment all versions should be generated. Check that. pre_rename_versions = [] for version_suffix in VERSIONS: path = self.F_IMAGE.version_path(version_suffix) pre_rename_versions.append(path) self.assertTrue(site.storage.exists(path)) # Attemp renaming the file url = '?'.join([self.url, urlencode({'dir': self.F_IMAGE.dirname, 'filename': self.F_IMAGE.filename})]) response = self.client.post(url, {'name': 'testpic.jpg'}) # Check we get 302 response for renaming self.assertTrue(response.status_code == 302) # Check the file was renamed correctly: self.assertTrue(site.storage.exists(os.path.join(self.F_IMAGE.head, 'testpic.jpg'))) # Store the renamed file self.F_IMAGE = FileObject(os.path.join(self.F_IMAGE.head, 'testpic.jpg'), site=site) # Check if all pre-rename versions were deleted: for path in pre_rename_versions: self.assertFalse(site.storage.exists(path)) # Check if all post–rename versions were deleted (resp. not being generated): for version_suffix in VERSIONS: path = self.F_IMAGE.version_path(version_suffix) self.assertFalse(site.storage.exists(path))
def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = force_bytes(safe, self.encoding) encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = force_bytes(k, self.encoding) output.extend(encode(k, force_bytes(v, self.encoding)) for v in list_) return '&'.join(output)
def ping_google(sitemap_url=None, ping_url=PING_URL): """ Alerts Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urls.reverse(). """ if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = reverse('django.contrib.sitemaps.views.index') except NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = reverse('django.contrib.sitemaps.views.sitemap') except NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") if not django_apps.is_installed('django.contrib.sites'): raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.") Site = django_apps.get_model('sites.Site') current_site = Site.objects.get_current() url = "http://%s%s" % (current_site.domain, sitemap_url) params = urlencode({'sitemap': url}) urlopen("%s?%s" % (ping_url, params))
def _encode_query_params(query_params): try: query_params = urlencode(query_params) except TypeError: pass query_params = '?' + query_params return query_params
def _created_proxy_response(self, request, path): request_payload = request.body self.log.debug("Request headers: %s", self.request_headers) path = quote_plus(path.encode('utf8'), QUOTE_SAFE) request_url = self.get_upstream(path) + path self.log.debug("Request URL: %s", request_url) if request.GET: get_data = encode_items(request.GET.lists()) request_url += '?' + urlencode(get_data) self.log.debug("Request URL: %s", request_url) try: proxy_response = self.http.urlopen(request.method, request_url, redirect=False, retries=self.retries, headers=self.request_headers, body=request_payload, decode_content=False, preload_content=False) self.log.debug("Proxy response header: %s", proxy_response.getheaders()) except urllib3.exceptions.HTTPError as error: self.log.exception(error) raise return proxy_response
def next_redirect(request, fallback, **get_kwargs): """ Handle the "where should I go next?" part of comment views. The next value could be a ``?next=...`` GET arg or the URL of a given view (``fallback``). See the view modules for examples. Returns an ``HttpResponseRedirect``. """ next = request.POST.get('next') if not is_safe_url(url=next, host=request.get_host()): next = resolve_url(fallback) if get_kwargs: if '#' in next: tmp = next.rsplit('#', 1) next = tmp[0] anchor = '#' + tmp[1] else: anchor = '' joiner = '&' if '?' in next else '?' next += joiner + urlencode(get_kwargs) + anchor return HttpResponseRedirect(next)
def redirect_view(request): "A view that redirects all requests to the GET view" if request.GET: query = '?' + urlencode(request.GET, True) else: query = '' return HttpResponseRedirect('/get_view/' + query)
def _build_api_url(self, path, query={}, version=None): """Return the URL for an API. By default, this uses the 2.0 API. The version can be overridden if the 1.0 API is needed. Args: path (unicode): The path relative to the root of the API. query (dict, optional): Optional query arguments for the request. version (unicode, optional): The optional custom API version to use. If not specified, the 2.0 API will be used. Returns: unicode: The absolute URL for the API. """ url = 'https://bitbucket.org/api/%s/%s' % (version or '2.0', path) if query: url += '?%s' % urlencode(query) return url
def extend_qs(base_url, **kwargs): """ Extend querystring of the URL with kwargs, taking care of python types. - True is converted to "1" - When a value is equal to False or None, then corresponding key is removed from the querystring at all. Please note that empty strings and numeric zeroes are not equal to False here. - Unicode is converted to utf-8 string - Everything else is converted to string using str(obj) For instance: >>> extend_querystring('/foo/?a=b', c='d', e=True, f=False) '/foo/?a=b&c=d&e=1' """ parsed = parse.urlparse(base_url) query = dict(parse.parse_qsl(parsed.query)) for key, value in kwargs.items(): value = convert_to_string(value) if value is None: query.pop(key, None) else: query[key] = value query_str = parse.urlencode(query) parsed_as_list = list(parsed) parsed_as_list[4] = query_str return parse.urlunparse(parsed_as_list)
def _generate_uri(self, limit, offset): if self.resource_uri is None: return None try: # QueryDict has a urlencode method that can handle multiple values for the same key request_params = self.request_data.copy() if 'limit' in request_params: del request_params['limit'] if 'offset' in request_params: del request_params['offset'] request_params.update({'limit': limit, 'offset': offset}) encoded_params = request_params.urlencode() except AttributeError: request_params = {} for k, v in self.request_data.items(): if isinstance(v, text_type): request_params[k] = v.encode('utf-8') else: request_params[k] = v if 'limit' in request_params: del request_params['limit'] if 'offset' in request_params: del request_params['offset'] request_params.update({'limit': limit, 'offset': offset}) encoded_params = urlencode(request_params) return '%s?%s' % (self.resource_uri, encoded_params)
def smart_urlquote(url): "Quotes a URL if it isn't already quoted." def unquote_quote(segment): segment = unquote(force_str(segment)) # Tilde is part of RFC3986 Unreserved Characters # http://tools.ietf.org/html/rfc3986#section-2.3 # See also http://bugs.python.org/issue16285 segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~')) return force_text(segment) # Handle IDN before quoting. try: scheme, netloc, path, query, fragment = urlsplit(url) except ValueError: # invalid IPv6 URL (normally square brackets in hostname part). return unquote_quote(url) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part return unquote_quote(url) if query: # Separately unquoting key/value, so as to not mix querystring separators # included in query values. See #22267. query_parts = [(unquote(force_str(q[0])), unquote(force_str(q[1]))) for q in parse_qsl(query, keep_blank_values=True)] # urlencode will take care of quoting query = urlencode(query_parts) path = unquote_quote(path) fragment = unquote_quote(fragment) return urlunsplit((scheme, netloc, path, query, fragment))
def _build_project_api_url(self, repository, rest_parts, query=None): """Return an API URL for the Gerrit projects API. Args: repository (reviewboard.scmtools.models.Repository): The repository configured to use Gerrit. rest_parts (iterable): The rest of the URL parts. **query (dict, optional): The query parameters to append to the URL. Returns: unicode: The full URL. """ parts = [ 'a', 'projects', quote_plus(repository.extra_data['gerrit_project_name']), ] parts.extend(rest_parts) url = urljoin(repository.extra_data['gerrit_url'], '/'.join(parts)) if query: url = '%s/?%s' % (url, urlencode(query)) else: url = '%s/' % url return url
def test_accept_ok(self): self.test_payment.status = 'in_progress' self.test_payment.save() payproc = getpaid.backends.epaydk.PaymentProcessor(self.test_payment) params = [ (u'txnid', u'48384464'), (u'orderid', unicode(self.test_payment.id)), (u'amount', payproc.format_amount(self.test_payment.amount)), (u'currency', u'208'), (u'date', u'20150716'), (u'time', u'1638'), (u'txnfee', u'0'), (u'paymenttype', u'1'), (u'cardno', u'444444XXXXXX4000'), ] md5hash = payproc.compute_hash(OrderedDict(params)) params.append(('hash', md5hash)) query = urlencode(params) url = reverse('getpaid:epaydk:success') + '?' + query response = self.client.get(url, data=params) expected_url = reverse('getpaid:success-fallback', kwargs=dict(pk=self.test_payment.pk)) self.assertRedirects(response, expected_url, 302, 302) Payment = apps.get_model('getpaid', 'Payment') actual = Payment.objects.get(id=self.test_payment.id) self.assertEqual(actual.status, 'accepted_for_proc')
def test_get_enabled_query(self): """Testing the search view with a query""" siteconfig = SiteConfiguration.objects.get_current() siteconfig.set('search_enable', True) siteconfig.save() try: rsp = self.client.get( '%s?%s' % (reverse('search'), urlencode({'q': 'foo'})) ) finally: siteconfig.set('search_enable', False) siteconfig.save() self.assertEqual(rsp.status_code, 200) # Check for the search form. self.assertIn('<form method="get" action="/search/">', rsp.content) # And the filtered search links. self.assertIn('<a href="?q=foo&model_filter=reviewrequests">', rsp.content) self.assertIn('<a href="?q=foo&model_filter=users">', rsp.content)
def ping_google(sitemap_url=None, ping_url=PING_URL): """ Alerts Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urlresolvers.reverse(). """ if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = urlresolvers.reverse("django.contrib.sitemaps.views.index") except urlresolvers.NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = urlresolvers.reverse("django.contrib.sitemaps.views.sitemap") except urlresolvers.NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") from django.contrib.sites.models import Site current_site = Site.objects.get_current() url = "http://%s%s" % (current_site.domain, sitemap_url) params = urlencode({"sitemap": url}) urlopen("%s?%s" % (ping_url, params))
def test_online_ok(self): self.test_payment.status = 'accepted_for_proc' self.test_payment.save() payproc = getpaid.backends.epaydk.PaymentProcessor(self.test_payment) params = [ (u'txnid', u'48384464'), (u'orderid', unicode(self.test_payment.id)), (u'amount', payproc.format_amount(self.test_payment.amount)), (u'currency', u'208'), (u'date', u'20150716'), (u'time', u'1638'), (u'txnfee', u'0'), (u'paymenttype', u'1'), (u'cardno', u'444444XXXXXX4000'), ] md5hash = payproc.compute_hash(OrderedDict(params)) params.append(('hash', md5hash)) query = urlencode(params) url = reverse('getpaid:epaydk:online') + '?' + query response = self.client.get(url, data=params) self.assertEqual(response.content, b'OK') self.assertEqual(response.status_code, 200) Payment = apps.get_model('getpaid', 'Payment') actual = Payment.objects.get(id=self.test_payment.id) self.assertEqual(actual.status, 'paid')
def make_querystring(self, context): values = {} filter_values = {} order_values = {} if context['active_filters']: filter_values = context['active_filters'] values.update(filter_values) if context['order_by']: order_values = { 'sort': context['order']['order_by'], 'direction': context['order']['type'], } if order_values['sort'][0] == '-': # We don't start sorting criteria with minus in querystring order_values['sort'] = order_values['sort'][1:] values.update(order_values) if values: values['redirected'] = 1 context['querystring'] = '?%s' % urlencode(values, 'utf-8') if order_values: context['query_order'] = order_values if filter_values: context['query_filters'] = filter_values
def test_do_upload(test): """ Test the actual uploading """ url = reverse('%s:fb_do_upload' % test.site_name) url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg'})]) with open(os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f: file_size = os.path.getsize(f.name) response = test.c.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # Check we get OK response test.assertTrue(response.status_code == 200) # Check the file now exists path = os.path.join(test.tmpdir.path, 'testimage.jpg') test.testfile = FileObject(path, site=test.site) test.assertTrue(test.site.storage.exists(path)) # Check the file has the correct size test.assertTrue(file_size == test.site.storage.size(path)) # Check permissions if DEFAULT_PERMISSIONS is not None: permissions_default = oct(DEFAULT_PERMISSIONS) permissions_file = oct(os.stat(test.testfile.path_full).st_mode & 777) test.assertTrue(permissions_default == permissions_file)
def add_hipchat(request): if "installable_url" in request.GET: url = request.GET["installable_url"] assert url.startswith("https://api.hipchat.com") response = requests.get(url) if "oauthId" not in response.json(): messages.warning(request, "Something went wrong!") return redirect("hc-channels") channel = Channel(kind="hipchat") channel.user = request.team.user channel.value = response.text channel.save() channel.refresh_hipchat_access_token() channel.assign_all_checks() messages.success(request, "The HipChat integration has been added!") return redirect("hc-channels") install_url = "https://www.hipchat.com/addons/install?" + urlencode({ "url": settings.SITE_ROOT + reverse("hc-hipchat-capabilities") }) ctx = { "page": "channels", "install_url": install_url } return render(request, "integrations/add_hipchat.html", ctx)
def test_post(self): uploaded_path = os.path.join(self.F_SUBFOLDER.path, 'testimage.jpg') self.assertFalse(site.storage.exists(uploaded_path)) url = '?'.join([self.url, urlencode({'folder': self.F_SUBFOLDER.path_relative_directory})]) with open(self.STATIC_IMG_PATH, "rb") as f: file_size = os.path.getsize(f.name) response = self.client.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # Check we get OK response self.assertTrue(response.status_code == 200) data = json.loads(response.content.decode('utf-8')) self.assertEqual(data["filename"], "testimage.jpg") self.assertEqual(data["temp_filename"], None) # Check the file now exists self.testfile = FileObject(uploaded_path, site=site) self.assertTrue(site.storage.exists(uploaded_path)) # Check the file has the correct size self.assertTrue(file_size == site.storage.size(uploaded_path)) # Check permissions # TODO: break out into separate test if DEFAULT_PERMISSIONS is not None: permissions_default = oct(DEFAULT_PERMISSIONS) permissions_file = oct(os.stat(self.testfile.path_full).st_mode & 0o777) self.assertTrue(permissions_default == permissions_file)
def get_metric_data(target, start="-5min", end="now"): """ Retrieves raw datapoints from a graphite target for a given period of time. :param target: A metric path string or a list of multiple metric paths :param start: A start time specification that Graphite will accept. :param end: An end time specification that Graphite will accept. :returns: A raw, response from Graphite. Normally a list of dicts that represent the names and datapoints of each matched target, like so:: [{'target': 'x', 'datapoints': [(value, timestamp), ...]}] """ if not target: return [] # no point in wasting time on http requests for no data base = CONFIG.get("graphiteweb", "base") url = urljoin(base, "/render/") # What does Graphite accept of formats? Lets check if the parameters are # datetime objects and try to force a format then if isinstance(start, datetime): start = start.strftime('%H:%M%Y%m%d') if isinstance(end, datetime): end = end.strftime('%H:%M%Y%m%d') query = { 'target': target, 'from': start, 'until': end, 'format': 'json', } query = urlencode(query, True) _logger.debug("get_metric_data%r", (target, start, end)) req = Request(url, data=query) try: response = urlopen(req) json_data = json.load(response) _logger.debug("get_metric_data: returning %d results", len(json_data)) return json_data except HTTPError as err: _logger.error( "Got a 500 error from graphite-web when fetching %s" "with data %s", err.url, query) _logger.error("Graphite output: %s", err.fp.read()) raise errors.GraphiteUnreachableError( "{0} is unreachable".format(base), err) except URLError as err: raise errors.GraphiteUnreachableError( "{0} is unreachable".format(base), err) except ValueError: # response could not be decoded return [] finally: try: response.close() except NameError: pass
def get_gateway_url(self, request): """ @see http://tech.epay.dk/en/payment-window-parameters @see http://tech.epay.dk/en/specification @see http://tech.epay.dk/en/payment-window-how-to-get-started `accepturl` - payment accepted for processing. `cancelurl` - user closed window before the payment is completed. `callbackurl` - is called instantly from the ePay server when the payment is completed. """ merchantnumber = unicode(self.get_backend_setting( 'merchantnumber', '')) if not merchantnumber: raise ImproperlyConfigured("epay.dk requires merchantnumber") # According to docs order ID should be a-Z 0-9. Max. 9 characters. # We use payment id here as we will have access to order from it. payment_id = unicode(self.payment.id) currency = unicode( PaymentProcessor.get_number_for_currency(self.payment.currency)) # timeout in minutes timeout = unicode(self.get_backend_setting('timeout', '3')) instantcallback = unicode( self.get_backend_setting('instantcallback', '0')) params = OrderedDict([ (u'merchantnumber', merchantnumber), (u'orderid', payment_id), (u'currency', currency), (u'amount', PaymentProcessor.format_amount(self.payment.amount)), (u'windowstate', u'3'), # 3 = Full screen (u'mobile', u'1'), # 1 = autodetect (u'timeout', timeout), (u'instantcallback', instantcallback), ]) user_data = { u'email': None, u'lang': None, } signals.user_data_query.send(sender=None, order=self.payment.order, user_data=user_data) prefered = user_data['lang'] or 'en' params['language'] = self._get_language_id(request, prefered=prefered) url_data = { 'domain': get_domain(request=request), 'scheme': request.scheme } params['accepturl'] = build_absolute_uri('getpaid-epaydk-success', **url_data) if not PaymentProcessor.get_backend_setting('callback_secret_path', ''): params['callbackurl'] = build_absolute_uri('getpaid-epaydk-online', **url_data) params['cancelurl'] = build_absolute_uri('getpaid-epaydk-failure', **url_data) params['hash'] = PaymentProcessor.compute_hash(params) url = u"{}?{}".format(self.BACKEND_GATEWAY_BASE_URL, urlencode(params)) return (url, 'GET', {})
def encode(k, v): return urlencode({k: v})
def add_pushover(request): if settings.PUSHOVER_API_TOKEN is None or settings.PUSHOVER_SUBSCRIPTION_URL is None: raise Http404("pushover integration is not available") if request.method == "POST": # Initiate the subscription nonce = get_random_string() request.session["po_nonce"] = nonce failure_url = settings.SITE_ROOT + reverse("hc-channels") success_url = settings.SITE_ROOT + reverse( "hc-add-pushover") + "?" + urlencode( { "nonce": nonce, "prio": request.POST.get("po_priority", "0"), }) subscription_url = settings.PUSHOVER_SUBSCRIPTION_URL + "?" + urlencode( { "success": success_url, "failure": failure_url, }) return redirect(subscription_url) # Handle successful subscriptions if "pushover_user_key" in request.GET: if "nonce" not in request.GET or "prio" not in request.GET: return HttpResponseBadRequest() # Validate nonce if request.GET["nonce"] != request.session.get("po_nonce"): return HttpResponseForbidden() # Validate priority if request.GET["prio"] not in ("-2", "-1", "0", "1", "2"): return HttpResponseBadRequest() # All looks well-- del request.session["po_nonce"] if request.GET.get("pushover_unsubscribed") == "1": # Unsubscription: delete all Pushover channels for this user Channel.objects.filter(user=request.user, kind="po").delete() return redirect("hc-channels") else: # Subscription user_key = request.GET["pushover_user_key"] priority = int(request.GET["prio"]) channel = Channel(user=request.team.user, kind="po") channel.value = "%s|%d" % (user_key, priority) channel.save() channel.assign_all_checks() return redirect("hc-channels") # Show Integration Settings form ctx = { "page": "channels", "po_retry_delay": td(seconds=settings.PUSHOVER_EMERGENCY_RETRY_DELAY), "po_expiration": td(seconds=settings.PUSHOVER_EMERGENCY_EXPIRATION), } return render(request, "integrations/add_pushover.html", ctx)
def get(self, viewname, *args, **params): return self.client.get('%s?%s' % (reverse(viewname, args=args), urlencode(params)))
def test_default_gravatar(self): d_param = urlencode({'d': settings.DEFAULT_AVATAR}) assert d_param in gravatar_url(self.u.email), \ "Bad default avatar: %s" % gravatar_url(self.u.email)
def question( request, id ): # refactor - long subroutine. display question body, answers and comments """view that displays body of the question and all answers to it TODO: convert this view into class """ # process url parameters # TODO: fix inheritance of sort method from questions form = ShowQuestionForm( dict(tuple(request.POST.items()) + tuple(request.GET.items()))) form.full_clean() # always valid show_answer = form.cleaned_data['show_answer'] show_comment = form.cleaned_data['show_comment'] show_page = form.cleaned_data['show_page'] answer_sort_method = form.cleaned_data['answer_sort_method'] # load question and maybe refuse showing deleted question # if the question does not exist - try mapping to old questions # and and if it is not found again - then give up qs = Post.objects.filter(post_type='question').select_related('thread') question_post = qs.filter(id=id).first() if question_post is None: # Handle URL mapping - from old Q/A/C/ URLs to the new one question_post = qs.filter(old_question_id=id).first() if question_post is None: raise Http404 if show_answer: try: old_answer = Post.objects.get_answers().get( old_answer_id=show_answer) except Post.DoesNotExist: pass else: return redirect(old_answer) elif show_comment: try: old_comment = Post.objects.get_comments().get( old_comment_id=show_comment) except Post.DoesNotExist: pass else: return redirect(old_comment) if show_comment or show_answer: try: show_post = Post.objects.get(pk=(show_comment or show_answer)) except Post.DoesNotExist: # missing target post will be handled later pass else: if (show_comment and not show_post.is_comment()) or \ (show_answer and not show_post.is_answer()): return redirect(show_post) try: question_post.assert_is_visible_to(request.user) except exceptions.QuestionHidden as error: traceback.print_exc() # request.user.message_set.create(message=force_text(error)) django_messages.info(request, force_text(error)) return redirect('index') # redirect if slug in the url is wrong if request.path.split('/')[-2] != question_post.slug: logging.debug('no slug match!') lang = translation.get_language() question_url = question_post.get_absolute_url(language=lang) if request.GET: question_url += '?' + urlencode(request.GET) return redirect(question_url) # resolve comment and answer permalinks # they go first because in theory both can be moved to another question # this block "returns" show_post and assigns actual comment and answer # to show_comment and show_answer variables # in the case if the permalinked items or their parents are gone - redirect # redirect also happens if id of the object's origin post != requested id show_post = None # used for permalinks if show_comment: # if url calls for display of a specific comment, # check that comment exists, that it belongs to # the current question # if it is an answer comment and the answer is hidden - # redirect to the default view of the question # if the question is hidden - redirect to the main page # in addition - if url points to a comment and the comment # is for the answer - we need the answer object try: show_comment = Post.objects.get_comments().get(id=show_comment) except Post.DoesNotExist as e: traceback.print_exc() error_message = _( 'Sorry, the comment you are looking for has been ' 'deleted and is no longer accessible') # request.user.message_set.create(message=error_message) django_messages.info(request, error_message) return redirect(question_post.thread) if str(show_comment.thread._question_post().id) != str(id): return redirect(show_comment) show_post = show_comment.parent try: show_comment.assert_is_visible_to(request.user) except exceptions.AnswerHidden as e: traceback.print_exc() # request.user.message_set.create(message=force_text(e)) django_messages.info(request, force_text(e)) # use reverse function here because question is not yet loaded return redirect('question', id=id) except exceptions.QuestionHidden as e: traceback.print_exc() # request.user.message_set.create(message=force_text(e)) django_messages.info(request, force_text(e)) return redirect('index') elif show_answer: # if the url calls to view a particular answer to # question - we must check whether the question exists # whether answer is actually corresponding to the current question # and that the visitor is allowed to see it show_post = get_object_or_404(Post, post_type='answer', id=show_answer) if str(show_post.thread._question_post().id) != str(id): return redirect(show_post) try: show_post.assert_is_visible_to(request.user) except django_exceptions.PermissionDenied as e: traceback.print_exc() # request.user.message_set.create(message=force_text(e)) django_messages.info(request, force_text(e)) return redirect('question', id=id) thread = question_post.thread logging.debug('answer_sort_method=' + force_text(answer_sort_method)) # load answers and post id's->athor_id mapping # posts are pre-stuffed with the correctly ordered comments question_post, answers, post_to_author, published_answer_ids = thread.get_post_data_for_question_view( sort_method=answer_sort_method, user=request.user) user_votes = {} user_post_id_list = list() # TODO: cache this query set, but again takes only 3ms! if request.user.is_authenticated(): user_votes = Vote.objects.\ filter(user=request.user, voted_post__id__in=post_to_author.keys()).\ values_list('voted_post_id', 'vote') user_votes = dict(user_votes) # we can avoid making this query by iterating through # already loaded posts user_post_id_list = [ post_id for post_id in post_to_author if post_to_author[post_id] == request.user.id ] # resolve page number and comment number for permalinks show_comment_position = None if show_comment: show_page = show_comment.get_page_number(answer_posts=answers) show_comment_position = show_comment.get_order_number() elif show_answer: show_page = show_post.get_page_number(answer_posts=answers) objects_list = Paginator(answers, const.ANSWERS_PAGE_SIZE) if show_page > objects_list.num_pages: return redirect(question_post) page_objects = objects_list.page(show_page) # count visits signals.question_visited.send(None, request=request, question=question_post) paginator_data = { 'is_paginated': (objects_list.count > const.ANSWERS_PAGE_SIZE), 'pages': objects_list.num_pages, 'current_page_number': show_page, 'page_object': page_objects, 'base_url': request.path + '?sort=%s&' % answer_sort_method, } paginator_context = functions.setup_paginator(paginator_data) # TODO: maybe consolidate all activity in the thread # for the user into just one query? favorited = thread.has_favorite_by_user(request.user) is_cacheable = True if show_page != 1: is_cacheable = False elif (show_comment_position or 0) > askbot_settings.MAX_COMMENTS_TO_SHOW: is_cacheable = False # maybe load draft initial = {} if request.user.is_authenticated(): # TODO: refactor into methor on thread drafts = DraftAnswer.objects.filter(author=request.user, thread=thread) if drafts.count() > 0: initial['text'] = drafts[0].text custom_answer_form_path = getattr(django_settings, 'ASKBOT_NEW_ANSWER_FORM', None) if custom_answer_form_path: answer_form_class = load_module(custom_answer_form_path) else: answer_form_class = AnswerForm answer_form = answer_form_class(initial=initial, user=request.user) user_can_post_comment = (request.user.is_authenticated() and request.user.can_post_comment(question_post)) new_answer_allowed = True previous_answer = None if request.user.is_authenticated(): if askbot_settings.LIMIT_ONE_ANSWER_PER_USER: for answer in answers: if answer.author_id == request.user.pk: new_answer_allowed = False previous_answer = answer break if request.user.is_authenticated() and askbot_settings.GROUPS_ENABLED: group_read_only = request.user.is_read_only() else: group_read_only = False data = { 'active_tab': 'questions', 'answer': answer_form, 'answers': page_objects.object_list, 'answer_count': thread.get_answer_count(request.user), 'blank_comment': MockPost(post_type='comment', author=request.user), # data for the js comment template 'category_tree_data': askbot_settings.CATEGORY_TREE, 'editor_is_unfolded': answer_form.has_data(), 'favorited': favorited, 'group_read_only': group_read_only, 'is_cacheable': False, # is_cacheable, # temporary, until invalidation fix 'language_code': translation.get_language(), 'long_time': const.LONG_TIME, # "forever" caching 'new_answer_allowed': new_answer_allowed, 'oldest_answer_id': thread.get_oldest_answer_id(request.user), 'page_class': 'question-page', 'paginator_context': paginator_context, 'previous_answer': previous_answer, 'published_answer_ids': published_answer_ids, 'question': question_post, 'show_comment': show_comment, 'show_comment_position': show_comment_position, 'show_post': show_post, 'similar_threads': thread.get_similar_threads(), 'tab_id': answer_sort_method, 'thread': thread, 'thread_is_moderated': thread.is_moderated(), 'user_is_thread_moderator': thread.has_moderator(request.user), 'user_votes': user_votes, 'user_post_id_list': user_post_id_list, 'user_can_post_comment': user_can_post_comment, # in general } # shared with ... if askbot_settings.GROUPS_ENABLED: data['sharing_info'] = thread.get_sharing_info() data.update(context.get_for_tag_editor()) extra = context.get_extra('ASKBOT_QUESTION_PAGE_EXTRA_CONTEXT', request, data) data.update(extra) return render(request, 'question.jinja', data)
def get_pagination_context(page, pages_to_show=11, url=None, size=None, extra=None, parameter_name='page'): """ Generate Bootstrap pagination context from a page object """ pages_to_show = int(pages_to_show) if pages_to_show < 1: raise ValueError( "Pagination pages_to_show should be a positive integer, you specified {pages}" .format(pages=pages_to_show)) num_pages = page.paginator.num_pages current_page = page.number half_page_num = int(floor(pages_to_show / 2)) if half_page_num < 0: half_page_num = 0 first_page = current_page - half_page_num if first_page <= 1: first_page = 1 if first_page > 1: pages_back = first_page - half_page_num if pages_back < 1: pages_back = 1 else: pages_back = None last_page = first_page + pages_to_show - 1 if pages_back is None: last_page += 1 if last_page > num_pages: last_page = num_pages if last_page < num_pages: pages_forward = last_page + half_page_num if pages_forward > num_pages: pages_forward = num_pages else: pages_forward = None if first_page > 1: first_page -= 1 if pages_back is not None and pages_back > 1: pages_back -= 1 else: pages_back = None pages_shown = [] for i in range(first_page, last_page + 1): pages_shown.append(i) # parse the url parts = urlparse(url or '') params = parse_qs(parts.query) # append extra querystring parameters to the url. if extra: params.update(parse_qs(extra)) # build url again. url = urlunparse([ parts.scheme, parts.netloc, parts.path, parts.params, urlencode(params, doseq=True), parts.fragment ]) # Set CSS classes, see http://getbootstrap.com/components/#pagination pagination_css_classes = ['pagination'] if size == 'small': pagination_css_classes.append('pagination-sm') elif size == 'large': pagination_css_classes.append('pagination-lg') return { 'bootstrap_pagination_url': url, 'num_pages': num_pages, 'current_page': current_page, 'first_page': first_page, 'last_page': last_page, 'pages_shown': pages_shown, 'pages_back': pages_back, 'pages_forward': pages_forward, 'pagination_css_classes': ' '.join(pagination_css_classes), 'parameter_name': parameter_name, }
def test_convert_normalize(test): """ Test the uploading with CONVERT_FILENAME, NORMALIZE_FILENAME """ url = reverse('%s:fb_do_upload' % test.site_name) url = '?'.join([ url, urlencode({ 'folder': test.tmpdir.path_relative_directory, 'qqfile': 'TEST IMAGE 000.jpg' }) ]) f = open( os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/TEST IMAGE 000.jpg'), "rb") # Save settings oe = filebrowser.sites.OVERWRITE_EXISTING cf = filebrowser.sites.CONVERT_FILENAME nf = filebrowser.sites.NORMALIZE_FILENAME # Set CONVERT_FILENAME, NORMALIZE_FILENAME filebrowser.sites.CONVERT_FILENAME = False filebrowser.sites.NORMALIZE_FILENAME = False filebrowser.utils.CONVERT_FILENAME = False filebrowser.utils.NORMALIZE_FILENAME = False test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 3) # OVERWRITE true filebrowser.sites.OVERWRITE_EXISTING = True test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000.jpg') test.assertTrue(test.site.storage.exists(path)) path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000_1.jpg') test.assertFalse(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 3) # OVERWRITE false filebrowser.sites.OVERWRITE_EXISTING = False test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 4) # Set CONVERT_FILENAME, NORMALIZE_FILENAME filebrowser.sites.CONVERT_FILENAME = True filebrowser.sites.NORMALIZE_FILENAME = False filebrowser.utils.CONVERT_FILENAME = True filebrowser.utils.NORMALIZE_FILENAME = False test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'test_image_000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 5) # OVERWRITE true filebrowser.sites.OVERWRITE_EXISTING = True test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'test_image_000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 5) # OVERWRITE false filebrowser.sites.OVERWRITE_EXISTING = False test.c.post(url, data={ 'qqfile': 'TTEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'test_image_000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 6) # Set CONVERT_FILENAME, NORMALIZE_FILENAME filebrowser.sites.CONVERT_FILENAME = True filebrowser.sites.NORMALIZE_FILENAME = True filebrowser.utils.CONVERT_FILENAME = True filebrowser.utils.NORMALIZE_FILENAME = True test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'test_image_000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 7) # OVERWRITE true filebrowser.sites.OVERWRITE_EXISTING = True test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'test_image_000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 7) # OVERWRITE false filebrowser.sites.OVERWRITE_EXISTING = False test.c.post(url, data={ 'qqfile': 'TEST IMAGE 000.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') path = os.path.join(test.tmpdir.path, 'test_image_000.jpg') test.assertTrue(test.site.storage.exists(path)) test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 8) # Reset settings filebrowser.sites.CONVERT_FILENAME = cf filebrowser.sites.NORMALIZE_FILENAME = nf filebrowser.utils.CONVERT_FILENAME = cf filebrowser.utils.NORMALIZE_FILENAME = nf filebrowser.sites.OVERWRITE_EXISTING = oe
def test_overwrite(test): """ Test the uploading with OVERWRITE_EXISTING """ # Save settings oe = filebrowser.sites.OVERWRITE_EXISTING # OVERWRITE true filebrowser.sites.OVERWRITE_EXISTING = True url = reverse('%s:fb_do_upload' % test.site_name) url = '?'.join([ url, urlencode({ 'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg' }) ]) with open( os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f: # file_size = os.path.getsize(f.name) test.c.post(url, data={ 'qqfile': 'testimage.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # Check files test.assertEqual(test.site.storage.listdir(test.tmpdir), ([], ['testimage.jpg'])) # OVERWRITE false filebrowser.sites.OVERWRITE_EXISTING = False url = reverse('%s:fb_do_upload' % test.site_name) url = '?'.join([ url, urlencode({ 'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg' }) ]) with open( os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f: # file_size = os.path.getsize(f.name) test.c.post(url, data={ 'qqfile': 'testimage.jpg', 'file': f }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # Check files test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 2) # Reset settings filebrowser.sites.OVERWRITE_EXISTING = oe
def translate(request, document_slug, document_locale): """ Create a new translation of a wiki document. * document_slug is for the default locale * translation is to the request locale """ # TODO: Refactor this view into two views? (new, edit) # That might help reduce the headache-inducing branchiness. # The parent document to translate from parent_doc = get_object_or_404(Document, locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug) # HACK: Seems weird, but sticking the translate-to locale in a query # param is the best way to avoid the MindTouch-legacy locale # redirection logic. document_locale = request.GET.get('tolocale', document_locale) # Set a "Discard Changes" page discard_href = '' if settings.WIKI_DEFAULT_LANGUAGE == document_locale: # Don't translate to the default language. return redirect( reverse('wiki.edit', locale=settings.WIKI_DEFAULT_LANGUAGE, args=[parent_doc.slug])) if not parent_doc.is_localizable: message = _(u'You cannot translate this document.') context = {'message': message} return render(request, 'handlers/400.html', context, status=400) based_on_rev = parent_doc.current_or_latest_revision() disclose_description = bool(request.GET.get('opendescription')) try: doc = parent_doc.translations.get(locale=document_locale) slug_dict = split_slug(doc.slug) except Document.DoesNotExist: doc = None disclose_description = True slug_dict = split_slug(document_slug) # Find the "real" parent topic, which is its translation if parent_doc.parent_topic: try: parent_topic_translated_doc = ( parent_doc.parent_topic.translations.get( locale=document_locale)) slug_dict = split_slug(parent_topic_translated_doc.slug + '/' + slug_dict['specific']) except ObjectDoesNotExist: pass user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user)) doc_form = None if user_has_doc_perm: if doc: # If there's an existing doc, populate form from it. discard_href = doc.get_absolute_url() doc.slug = slug_dict['specific'] doc_initial = document_form_initial(doc) else: # If no existing doc, bring over the original title and slug. discard_href = parent_doc.get_absolute_url() doc_initial = { 'title': based_on_rev.title, 'slug': slug_dict['specific'] } doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict['parent']) initial = { 'based_on': based_on_rev.id, 'current_rev': doc.current_or_latest_revision().id if doc else None, 'comment': '', 'toc_depth': based_on_rev.toc_depth, 'localization_tags': ['inprogress'], } content = None if not doc: content = based_on_rev.content if content: initial.update(content=kuma.wiki.content.parse( content).filterEditorSafety().serialize()) instance = doc and doc.current_or_latest_revision() rev_form = RevisionForm(request=request, instance=instance, initial=initial, parent_slug=slug_dict['parent']) if request.method == 'POST': which_form = request.POST.get('form-type', 'both') doc_form_invalid = False # Grab the posted slug value in case it's invalid posted_slug = request.POST.get('slug', slug_dict['specific']) if user_has_doc_perm and which_form in ['doc', 'both']: disclose_description = True post_data = request.POST.copy() post_data.update({'locale': document_locale}) doc_form = DocumentForm(post_data, instance=doc, parent_slug=slug_dict['parent']) doc_form.instance.locale = document_locale doc_form.instance.parent = parent_doc if which_form == 'both': # Sending a new copy of post so the slug change above # doesn't cause problems during validation rev_form = RevisionForm(request=request, data=post_data, parent_slug=slug_dict['parent']) # If we are submitting the whole form, we need to check that # the Revision is valid before saving the Document. if doc_form.is_valid() and (which_form == 'doc' or rev_form.is_valid()): doc = doc_form.save(parent=parent_doc) if which_form == 'doc': url = urlparams(doc.get_edit_url(), opendescription=1) return redirect(url) else: doc_form.data['slug'] = posted_slug doc_form_invalid = True if doc and which_form in ['rev', 'both']: post_data = request.POST.copy() if 'slug' not in post_data: post_data['slug'] = posted_slug # update the post data with the toc_depth of original post_data['toc_depth'] = based_on_rev.toc_depth # Pass in the locale for the akistmet "blog_lang". post_data['locale'] = document_locale rev_form = RevisionForm(request=request, data=post_data, parent_slug=slug_dict['parent']) rev_form.instance.document = doc # for rev_form.clean() if rev_form.is_valid() and not doc_form_invalid: parent_id = request.POST.get('parent_id', '') # Attempt to set a parent if parent_id: try: parent_doc = get_object_or_404(Document, id=parent_id) rev_form.instance.document.parent = parent_doc doc.parent = parent_doc rev_form.instance.based_on.document = doc.original except Document.DoesNotExist: pass rev_form.save(doc) # If this is an Ajax POST, then return a JsonResponse if request.is_ajax(): data = { 'error': False, 'new_revision_id': rev_form.instance.id, } return JsonResponse(data) # Construct the redirect URL, adding any needed parameters url = doc.get_absolute_url() params = {} # Parameter for the document saved, so that we can delete the cached draft on load params['rev_saved'] = request.POST.get('current_rev', '') url = '%s?%s' % (url, urlencode(params)) return redirect(url) else: # If this is an Ajax POST, then return a JsonResponse with error if request.is_ajax(): if 'current_rev' in rev_form._errors: # Make the error message safe so the '<' and '>' don't # get turned into '<' and '>', respectively rev_form.errors['current_rev'][0] = mark_safe( rev_form.errors['current_rev'][0]) errors = [ rev_form.errors[key][0] for key in rev_form.errors.keys() ] data = { "error": True, "error_message": errors, "new_revision_id": rev_form.instance.id, } return JsonResponse(data=data) if doc: from_id = smart_int(request.GET.get('from'), None) to_id = smart_int(request.GET.get('to'), None) revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent) revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent) else: revision_from = revision_to = None parent_split = split_slug(parent_doc.slug) language_mapping = get_language_mapping() language = language_mapping[document_locale.lower()] default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()] context = { 'parent': parent_doc, 'document': doc, 'document_form': doc_form, 'revision_form': rev_form, 'locale': document_locale, 'default_locale': default_locale, 'language': language, 'based_on': based_on_rev, 'disclose_description': disclose_description, 'discard_href': discard_href, 'attachment_form': AttachmentRevisionForm(), 'specific_slug': parent_split['specific'], 'parent_slug': parent_split['parent'], 'revision_from': revision_from, 'revision_to': revision_to, } return render(request, 'wiki/translate.html', context)
def get_url(self): flavor_link = self.flavor_id.get_url() data = {'volume_id': self.vol_id.id} product_url = "?".join([flavor_link, urlencode(data)]) return product_url
def get_login_url(**query): return reverse('login') + '?' + parse.urlencode(query)
def create_graph_url(self): """Create url for getting a graph from Graphite""" return reverse("graphite-render") + "?" + urlencode(self.graph_args, True)
def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets. Args: request (django.http.HttpRequest): The HTTP request from the client. webhook_targets (list of reviewboard.notifications.models.WebHookTarget): The list of WebHook targets containing endpoint URLs to dispatch to. event (unicode): The name of the event being dispatched. payload (dict): The payload data to encode for the WebHook payload. Raises: ValueError: There was an error with the payload format. Details are in the log and the exception message. """ try: payload = normalize_webhook_payload(payload, request) except TypeError as e: logging.exception('WebHook payload passed to dispatch_webhook_event ' 'containing invalid data types: %s', e) raise ValueError(six.text_type(e)) encoder = BasicAPIEncoder() bodies = {} for webhook_target in webhook_targets: if webhook_target.use_custom_content: try: body = render_custom_content(webhook_target.custom_content, payload) body = body.encode('utf-8') except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: encoding = webhook_target.encoding if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: adapter = JSONEncoderAdapter(encoder) body = adapter.encode(payload, request=request) body = body.encode('utf-8') elif encoding == webhook_target.ENCODING_XML: adapter = XMLEncoderAdapter(encoder) body = adapter.encode(payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(payload, request=request), }) body = body.encode('utf-8') else: logging.error('Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()) .encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url url_parts = urlsplit(url) if url_parts.username or url_parts.password: netloc = url_parts.netloc.split('@', 1)[1] url = urlunsplit( (url_parts.scheme, netloc, url_parts.path, url_parts.params, url_parts.query)) password_mgr = HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password( None, url, url_parts.username, url_parts.password) handler = HTTPBasicAuthHandler(password_mgr) opener = build_opener(handler) else: opener = build_opener() opener.open(Request(url.encode('utf-8'), body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e)
def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets. Args: request (django.http.HttpRequest): The HTTP request from the client. webhook_targets (list of reviewboard.notifications.models.WebHookTarget): The list of WebHook targets containing endpoint URLs to dispatch to. event (unicode): The name of the event being dispatched. payload (dict): The payload data to encode for the WebHook payload. Raises: ValueError: There was an error with the payload format. Details are in the log and the exception message. """ encoder = BasicAPIEncoder() bodies = {} raw_norm_payload = None json_norm_payload = None for webhook_target in webhook_targets: use_custom_content = webhook_target.use_custom_content encoding = webhook_target.encoding # See how we need to handle normalizing this payload. If we need # something JSON-safe, then we need to go the more aggressive route # and normalize keys to strings. if raw_norm_payload is None or json_norm_payload is None: try: if (raw_norm_payload is None and (use_custom_content or encoding == webhook_target.ENCODING_XML)): # This payload's going to be provided for XML and custom # templates. We don't want to alter the keys at all. raw_norm_payload = normalize_webhook_payload( payload=payload, request=request) elif (json_norm_payload is None and not use_custom_content and encoding in (webhook_target.ENCODING_JSON, webhook_target.ENCODING_FORM_DATA)): # This payload's going to be provided for JSON or # form-data. We want to normalize all keys to strings. json_norm_payload = normalize_webhook_payload( payload=payload, request=request, use_string_keys=True) except TypeError as e: logging.exception( 'WebHook payload passed to ' 'dispatch_webhook_event containing invalid ' 'data types: %s', e) raise ValueError(six.text_type(e)) if use_custom_content: try: assert raw_norm_payload is not None body = render_custom_content(webhook_target.custom_content, raw_norm_payload) body = force_bytes(body) except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: assert json_norm_payload is not None adapter = JSONEncoderAdapter(encoder) body = adapter.encode(json_norm_payload, request=request) elif encoding == webhook_target.ENCODING_XML: assert raw_norm_payload is not None adapter = XMLEncoderAdapter(encoder) body = adapter.encode(raw_norm_payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: assert json_norm_payload is not None adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(json_norm_payload, request=request), }) else: logging.error( 'Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue body = force_bytes(body) bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()).encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url url_parts = urlsplit(url) if url_parts.username or url_parts.password: netloc = url_parts.netloc.split('@', 1)[1] url = urlunsplit((url_parts.scheme, netloc, url_parts.path, url_parts.params, url_parts.query)) password_mgr = HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, url, url_parts.username, url_parts.password) handler = HTTPBasicAuthHandler(password_mgr) opener = build_opener(handler) else: opener = build_opener() opener.open(Request(url, body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e)
def _get_geocode_url(address): if isinstance(address, text_type): address = address.encode('utf8') params = urlencode({'geocode': address, 'format': 'json', 'results': 1}) return GEOCODE_URL + params
def test_something(self, urlopen): ping_google() params = urlencode({'sitemap': 'http://example.com/sitemap-without-entries/sitemap.xml'}) full_url = 'https://www.google.com/webmasters/tools/ping?%s' % params urlopen.assert_called_with(full_url)
def get_encoded_query_params(request): # Return encoded query params to be used in proxied request""" get_data = encode_items(request.GET.lists()) return urlencode(get_data)
def login(self): login_url = '%sindex/login/' % self.base_url opener = build_opener(HTTPCookieProcessor()) data = urlencode({'username': self.username, 'password': self.password}) opener.open(login_url, data.encode('utf-8'), TIMEOUT) install_opener(opener)
def redirect_to_self_with_changing_query_view(request): query = request.GET.copy() query['counter'] += '0' return HttpResponseRedirect( '/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def __str__(self): return reverse("graphite-render") + "?" + urlencode(self.args, True)
def get_pagination_context( page, pages_to_show=11, url=None, size=None, justify_content=None, extra=None, parameter_name="page", ): """ Generate Bootstrap pagination context from a page object """ pages_to_show = int(pages_to_show) if pages_to_show < 1: raise ValueError( "Pagination pages_to_show should be a positive integer, you specified {pages}" .format(pages=pages_to_show)) num_pages = page.paginator.num_pages current_page = page.number half_page_num = int(floor(pages_to_show / 2)) if half_page_num < 0: half_page_num = 0 first_page = current_page - half_page_num if first_page <= 1: first_page = 1 if first_page > 1: pages_back = first_page - half_page_num if pages_back < 1: pages_back = 1 else: pages_back = None last_page = first_page + pages_to_show - 1 if pages_back is None: last_page += 1 if last_page > num_pages: last_page = num_pages if last_page < num_pages: pages_forward = last_page + half_page_num if pages_forward > num_pages: pages_forward = num_pages else: pages_forward = None if first_page > 1: first_page -= 1 if pages_back is not None and pages_back > 1: pages_back -= 1 else: pages_back = None pages_shown = [] for i in range(first_page, last_page + 1): pages_shown.append(i) # parse the url parts = urlparse(url or "") params = parse_qs(parts.query) # append extra querystring parameters to the url. if extra: params.update(parse_qs(extra)) # build url again. url = urlunparse([ parts.scheme, parts.netloc, parts.path, parts.params, urlencode(params, doseq=True), parts.fragment, ]) # Set CSS classes, see http://getbootstrap.com/components/#pagination pagination_css_classes = ["pagination"] if size == "small": pagination_css_classes.append("pagination-sm") elif size == "large": pagination_css_classes.append("pagination-lg") if justify_content == "start": pagination_css_classes.append("justify-content-start") elif justify_content == "center": pagination_css_classes.append("justify-content-center") elif justify_content == "end": pagination_css_classes.append("justify-content-end") return { "bootstrap_pagination_url": url, "num_pages": num_pages, "current_page": current_page, "first_page": first_page, "last_page": last_page, "pages_shown": pages_shown, "pages_back": pages_back, "pages_forward": pages_forward, "pagination_css_classes": " ".join(pagination_css_classes), "parameter_name": parameter_name, }
def _encode_query_params(query_params): try: query_params = urlencode(query_params) except TypeError: pass return '?' + query_params
def edit(request, document_slug, document_locale): """ Create a new revision of a wiki document, or edit document metadata. """ doc = get_object_or_404(Document, locale=document_locale, slug=document_slug) # If this document has a parent, then the edit is handled by the # translate view. Pass it on. if doc.parent and doc.parent.id != doc.id: return translate(request, doc.parent.slug, doc.locale, bypass_process_document_path=True) rev = doc.current_revision or doc.revisions.order_by('-created', '-id')[0] # Keep hold of the full post slug slug_dict = split_slug(document_slug) # Update the slug, removing the parent path, and # *only* using the last piece. # This is only for the edit form. rev.slug = slug_dict['specific'] section_id = request.GET.get('section', None) if section_id and not request.is_ajax(): return HttpResponse(ugettext("Sections may only be edited inline.")) disclose_description = bool(request.GET.get('opendescription')) doc_form = rev_form = None rev_form = RevisionForm(request=request, instance=rev, initial={ 'based_on': rev.id, 'current_rev': rev.id, 'comment': '' }, section_id=section_id) if doc.allows_editing_by(request.user): doc_form = DocumentForm(initial=document_form_initial(doc)) # Need to make check *here* to see if this could have a translation parent show_translation_parent_block = ( (document_locale != settings.WIKI_DEFAULT_LANGUAGE) and (not doc.parent_id)) if request.method == 'GET': if not (rev_form or doc_form): # You can't do anything on this page, so get lost. raise PermissionDenied else: # POST is_async_submit = request.is_ajax() is_raw = request.GET.get('raw', False) need_edit_links = request.GET.get('edit_links', False) parent_id = request.POST.get('parent_id', '') # Attempt to set a parent if show_translation_parent_block and parent_id: try: parent_doc = get_object_or_404(Document, id=parent_id) doc.parent = parent_doc except Document.DoesNotExist: pass # Comparing against localized names for the Save button bothers me, so # I embedded a hidden input: which_form = request.POST.get('form-type') if which_form == 'doc': if doc.allows_editing_by(request.user): post_data = request.POST.copy() post_data.update({'locale': document_locale}) doc_form = DocumentForm(post_data, instance=doc) if doc_form.is_valid(): # if must be here for section edits if 'slug' in post_data: post_data['slug'] = u'/'.join( [slug_dict['parent'], post_data['slug']]) # Get the possibly new slug for the imminent redirection: doc = doc_form.save(parent=None) return redirect( urlparams(doc.get_edit_url(), opendescription=1)) disclose_description = True else: raise PermissionDenied elif which_form == 'rev': post_data = request.POST.copy() rev_form = RevisionForm(request=request, data=post_data, is_async_submit=is_async_submit, section_id=section_id) rev_form.instance.document = doc # for rev_form.clean() # Come up with the original revision to which these changes # would be applied. orig_rev_id = request.POST.get('current_rev', False) if orig_rev_id is False: orig_rev = None else: orig_rev = Revision.objects.get(pk=orig_rev_id) # Get the document's actual current revision. curr_rev = doc.current_revision if not rev_form.is_valid(): # If this was an Ajax POST, then return a JsonResponse if is_async_submit: # Was there a mid-air collision? if 'current_rev' in rev_form._errors: # Make the error message safe so the '<' and '>' don't # get turned into '<' and '>', respectively rev_form.errors['current_rev'][0] = mark_safe( rev_form.errors['current_rev'][0]) errors = [ rev_form.errors[key][0] for key in rev_form.errors.keys() ] data = { "error": True, "error_message": errors, "new_revision_id": curr_rev.id, } return JsonResponse(data=data) # Jump out to a function to escape indentation hell return _edit_document_collision(request, orig_rev, curr_rev, is_async_submit, is_raw, rev_form, doc_form, section_id, rev, doc) # Was this an Ajax submission that was marked as spam? if is_async_submit and '__all__' in rev_form._errors: # Return a JsonResponse data = { "error": True, "error_message": mark_safe(rev_form.errors['__all__'][0]), "new_revision_id": curr_rev.id, } return JsonResponse(data=data) if rev_form.is_valid(): rev_form.save(doc) if (is_raw and orig_rev is not None and curr_rev.id != orig_rev.id): # If this is the raw view, and there was an original # revision, but the original revision differed from the # current revision at start of editing, we should tell # the client to refresh the page. response = HttpResponse('RESET') response['X-Frame-Options'] = 'SAMEORIGIN' response.status_code = 205 return response # Is this an Ajax POST? if is_async_submit: # This is the most recent revision id new_rev_id = rev.document.revisions.order_by( '-id').first().id data = {"error": False, "new_revision_id": new_rev_id} return JsonResponse(data) if rev_form.instance.is_approved: view = 'wiki.document' else: view = 'wiki.document_revisions' # Construct the redirect URL, adding any needed parameters url = reverse(view, args=[doc.slug], locale=doc.locale) params = {} if is_raw: params['raw'] = 'true' if need_edit_links: # Only need to carry over ?edit_links with ?raw, # because they're on by default in the normal UI params['edit_links'] = 'true' if section_id: # If a section was edited, and we're using the raw # content API, constrain to that section. params['section'] = section_id # Parameter for the document saved, so that we can delete the cached draft on load params['rev_saved'] = curr_rev.id if curr_rev else '' url = '%s?%s' % (url, urlencode(params)) if not is_raw and section_id: # If a section was edited, jump to the section anchor # if we're not getting raw content. url = '%s#%s' % (url, section_id) return redirect(url) parent_path = parent_slug = '' if slug_dict['parent']: parent_slug = slug_dict['parent'] if doc.parent_topic_id: parent_doc = Document.objects.get(pk=doc.parent_topic_id) parent_path = parent_doc.get_absolute_url() parent_slug = parent_doc.slug context = { 'revision_form': rev_form, 'document_form': doc_form, 'section_id': section_id, 'disclose_description': disclose_description, 'parent_slug': parent_slug, 'parent_path': parent_path, 'revision': rev, 'document': doc, 'attachment_form': AttachmentRevisionForm(), } return render(request, 'wiki/edit.html', context)
def dispatch_webhook_event(request, webhook_targets, event, payload): """Dispatch the given event and payload to the given WebHook targets.""" encoder = ResourceAPIEncoder() bodies = {} for webhook_target in webhook_targets: if webhook_target.use_custom_content: try: body = render_custom_content(webhook_target.custom_content, payload) body = body.encode('utf-8') except Exception as e: logging.exception('Could not render WebHook payload: %s', e) continue else: encoding = webhook_target.encoding if encoding not in bodies: try: if encoding == webhook_target.ENCODING_JSON: adapter = JSONEncoderAdapter(encoder) body = adapter.encode(payload, request=request) body = body.encode('utf-8') elif encoding == webhook_target.ENCODING_XML: adapter = XMLEncoderAdapter(encoder) body = adapter.encode(payload, request=request) elif encoding == webhook_target.ENCODING_FORM_DATA: adapter = JSONEncoderAdapter(encoder) body = urlencode({ 'payload': adapter.encode(payload, request=request), }) body = body.encode('utf-8') else: logging.error( 'Unexpected WebHookTarget encoding "%s" ' 'for ID %s', encoding, webhook_target.pk) continue except Exception as e: logging.exception('Could not encode WebHook payload: %s', e) continue bodies[encoding] = body else: body = bodies[encoding] headers = { b'X-ReviewBoard-Event': event.encode('utf-8'), b'Content-Type': webhook_target.encoding.encode('utf-8'), b'Content-Length': len(body), b'User-Agent': ('ReviewBoard-WebHook/%s' % get_package_version()).encode('utf-8'), } if webhook_target.secret: signer = hmac.new(webhook_target.secret.encode('utf-8'), body, hashlib.sha1) headers[b'X-Hub-Signature'] = \ ('sha1=%s' % signer.hexdigest()).encode('utf-8') logging.info('Dispatching webhook for event %s to %s', event, webhook_target.url) try: url = webhook_target.url.encode('utf-8') urlopen(Request(url, body, headers)) except Exception as e: logging.exception('Could not dispatch WebHook to %s: %s', webhook_target.url, e)