def _sanitize_untrusted_url(response, url): if isinstance(url, bytes): url = url.decode('utf8', 'replace') if not url.startswith('/') or url.startswith('//'): url = '/?bad_redirect=' + urlquote(url) host = response.request.headers[b'Host'].decode('ascii') # ^ this is safe because we don't accept requests with unknown hosts return response.website.canonical_scheme + '://' + host + url
def parse_network(net): if net == 'private': return [net] elif net.startswith('https://'): d = env.log_dir + '/trusted_proxies/' mkdir_p(d) filename = d + urlquote(net, '') skip_download = ( os.path.exists(filename) and os.stat(filename).st_size > 0 and os.stat(filename).st_mtime > time() - 60*60*24*7 ) if not skip_download: urlretrieve(net, filename) with open(filename, 'rb') as f: return [ip_network(x) for x in f.read().decode('ascii').strip().split()] else: return [ip_network(net)]
def parse_network(net): if net == 'private': return [net] elif net.startswith('https://'): d = env.log_dir + '/trusted_proxies/' mkdir_p(d) filename = d + urlquote(net, '') skip_download = ( os.path.exists(filename) and os.stat(filename).st_size > 0 and os.stat(filename).st_mtime > time() - 60*60*24*7 ) if not skip_download: tmpfd, tmp_path = mkstemp(dir=d) with open(tmpfd, 'w') as f: f.write(requests.get(net).text) os.rename(tmp_path, filename) with open(filename, 'rb') as f: return [ip_network(x) for x in f.read().decode('ascii').strip().split()] else: return [ip_network(net)]
def _encode_url(url): return maybe_encode(urlquote(url, string.punctuation))