def test_mixed_types_rejected(self): # Several functions that process either strings or ASCII encoded bytes # accept multiple arguments. Check they reject mixed type input with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlparse("www.python.org", b"http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlparse(b"www.python.org", "http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlsplit("www.python.org", b"http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlsplit(b"www.python.org", "http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunparse( (b"http", "www.python.org", "", "", "", "")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunparse( ("http", b"www.python.org", "", "", "", "")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunsplit((b"http", "www.python.org", "", "", "")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunsplit(("http", b"www.python.org", "", "", "")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urljoin("http://python.org", b"http://python.org") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urljoin(b"http://python.org", "http://python.org")
def _get_encoded_url(self): """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.""" sres = urllib_parse.urlsplit(self.file_path) return urllib_parse.urlunsplit( urllib_parse.SplitResult(sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
def test_mixed_types_rejected(self): # Several functions that process either strings or ASCII encoded bytes # accept multiple arguments. Check they reject mixed type input with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlparse("www.python.org", b"http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlparse(b"www.python.org", "http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlsplit("www.python.org", b"http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlsplit(b"www.python.org", "http") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunparse(( b"http", "www.python.org","","","","")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunparse(("http", b"www.python.org","","","","")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunsplit((b"http", "www.python.org","","","")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urlunsplit(("http", b"www.python.org","","","")) with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urljoin("http://python.org", b"http://python.org") with self.assertRaisesRegex(TypeError, "Cannot mix str"): urllib_parse.urljoin(b"http://python.org", "http://python.org")
def checkRoundtrips(self, url, parsed, split): result = urllib_parse.urlparse(url) self.assertEqual(result, parsed) t = (result.scheme, result.netloc, result.path, result.params, result.query, result.fragment) self.assertEqual(t, parsed) # put it back together and it should be the same result2 = urllib_parse.urlunparse(result) self.assertEqual(result2, url) self.assertEqual(result2, result.geturl()) # the result of geturl() is a fixpoint; we can always parse it # again to get the same result: result3 = urllib_parse.urlparse(result.geturl()) self.assertEqual(result3.geturl(), result.geturl()) self.assertEqual(result3, result) self.assertEqual(result3.scheme, result.scheme) self.assertEqual(result3.netloc, result.netloc) self.assertEqual(result3.path, result.path) self.assertEqual(result3.params, result.params) self.assertEqual(result3.query, result.query) self.assertEqual(result3.fragment, result.fragment) self.assertEqual(result3.username, result.username) self.assertEqual(result3.password, result.password) self.assertEqual(result3.hostname, result.hostname) self.assertEqual(result3.port, result.port) # check the roundtrip using urlsplit() as well result = urllib_parse.urlsplit(url) self.assertEqual(result, split) t = (result.scheme, result.netloc, result.path, result.query, result.fragment) self.assertEqual(t, split) result2 = urllib_parse.urlunsplit(result) self.assertEqual(result2, url) self.assertEqual(result2, result.geturl()) # check the fixpoint property of re-parsing the result of geturl() result3 = urllib_parse.urlsplit(result.geturl()) self.assertEqual(result3.geturl(), result.geturl()) self.assertEqual(result3, result) self.assertEqual(result3.scheme, result.scheme) self.assertEqual(result3.netloc, result.netloc) self.assertEqual(result3.path, result.path) self.assertEqual(result3.query, result.query) self.assertEqual(result3.fragment, result.fragment) self.assertEqual(result3.username, result.username) self.assertEqual(result3.password, result.password) self.assertEqual(result3.hostname, result.hostname) self.assertEqual(result3.port, result.port)
def get_or_post(uri, method, req, content_type=DEFAULT_POST_CONTENT_TYPE, accept=None, **kwargs): """ :param uri: :param method: :param req: :param content_type: :param accept: :param kwargs: :return: """ if method in ["GET", "DELETE"]: if req.keys(): _req = req.copy() comp = urlsplit(str(uri)) if comp.query: _req.update(parse_qs(comp.query)) _query = str(_req.to_urlencoded()) path = urlunsplit( (comp.scheme, comp.netloc, comp.path, _query, comp.fragment)) else: path = uri body = None elif method in ["POST", "PUT"]: path = uri if content_type == URL_ENCODED: body = req.to_urlencoded() elif content_type == JSON_ENCODED: body = req.to_json() else: raise UnSupported("Unsupported content type: '%s'" % content_type) header_ext = {"Content-Type": content_type} if accept: header_ext = {"Accept": accept} if "headers" in kwargs.keys(): kwargs["headers"].update(header_ext) else: kwargs["headers"] = header_ext else: raise UnSupported("Unsupported HTTP method: '%s'" % method) return path, body, kwargs
def test_unparse_parse(self): str_cases = [ 'Python', './Python', 'x-newscheme://foo.com/stuff', 'x://y', 'x:/y', 'x:/', '/', ] bytes_cases = [x.encode('ascii') for x in str_cases] for u in str_cases + bytes_cases: self.assertEqual(urllib_parse.urlunsplit(urllib_parse.urlsplit(u)), u) self.assertEqual(urllib_parse.urlunparse(urllib_parse.urlparse(u)), u)
def get_or_post(uri, method, req, content_type=DEFAULT_POST_CONTENT_TYPE, accept=None, **kwargs): """ :param uri: :param method: :param req: :param content_type: :param accept: :param kwargs: :return: """ if method in ["GET", "DELETE"]: if req.keys(): _req = req.copy() comp = urlsplit(str(uri)) if comp.query: _req.update(parse_qs(comp.query)) _query = str(_req.to_urlencoded()) path = urlunsplit((comp.scheme, comp.netloc, comp.path, _query, comp.fragment)) else: path = uri body = None elif method in ["POST", "PUT"]: path = uri if content_type == URL_ENCODED: body = req.to_urlencoded() elif content_type == JSON_ENCODED: body = req.to_json() else: raise UnSupported( "Unsupported content type: '%s'" % content_type) header_ext = {"Content-Type": content_type} if accept: header_ext = {"Accept": accept} if "headers" in kwargs.keys(): kwargs["headers"].update(header_ext) else: kwargs["headers"] = header_ext else: raise UnSupported("Unsupported HTTP method: '%s'" % method) return path, body, kwargs
def download(self, custom_path=None, out=None, timeout=None): """ Download this file. By default, the file is saved in the current working directory with its original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be saved to that path instead. If :attr:`out` is defined, the file contents will be saved to that object using the ``out.write`` method. Note: `custom_path` and `out` are mutually exclusive. Args: custom_path (:obj:`str`, optional): Custom path. out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if applicable. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). Raises: ValueError: If both ``custom_path`` and ``out`` are passed. """ if custom_path is not None and out is not None: raise ValueError('custom_path and out are mutually exclusive') # Convert any UTF-8 char into a url encoded ASCII string. sres = urllib_parse.urlsplit(self.file_path) url = urllib_parse.urlunsplit( urllib_parse.SplitResult(sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment)) if out: buf = self.bot.request.retrieve(url) out.write(buf) else: if custom_path: filename = custom_path else: filename = basename(self.file_path) self.bot.request.download(url, filename, timeout=timeout)
def download(self, custom_path=None, out=None, timeout=None): """ Download this file. By default, the file is saved in the current working directory with its original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be saved to that path instead. If :attr:`out` is defined, the file contents will be saved to that object using the ``out.write`` method. Note: `custom_path` and `out` are mutually exclusive. Args: custom_path (:obj:`str`, optional): Custom path. out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if applicable. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). Raises: ValueError: If both ``custom_path`` and ``out`` are passed. """ if custom_path is not None and out is not None: raise ValueError('custom_path and out are mutually exclusive') # Convert any UTF-8 char into a url encoded ASCII string. sres = urllib_parse.urlsplit(self.file_path) url = urllib_parse.urlunsplit(urllib_parse.SplitResult( sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment)) if out: buf = self.bot.request.retrieve(url) out.write(buf) else: if custom_path: filename = custom_path else: filename = basename(self.file_path) self.bot.request.download(url, filename, timeout=timeout)
def generate_return_url(self, return_to, uid, path=""): """ :param return_to: If it starts with '/' it's an absolute path otherwise a relative path. :param uid: :param path: The verify path """ if return_to.startswith("http"): up = urlsplit(return_to) _path = up.path else: up = None _path = return_to if not _path.startswith("/"): p = path.split("/") p[-1] = _path _path = "/".join(p) if up: _path = urlunsplit([up[0], up[1], _path, up[3], up[4]]) return create_return_url(_path, uid, **{self.query_param: "true"})
def test_unparse_parse(self): str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',] bytes_cases = [x.encode('ascii') for x in str_cases] for u in str_cases + bytes_cases: self.assertEqual(urllib_parse.urlunsplit(urllib_parse.urlsplit(u)), u) self.assertEqual(urllib_parse.urlunparse(urllib_parse.urlparse(u)), u)
def _get_encoded_url(self): """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.""" sres = urllib_parse.urlsplit(self.file_path) return urllib_parse.urlunsplit(urllib_parse.SplitResult( sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))