Ejemplo n.º 1
0
 def test_mixed_types_rejected(self):
     # Several functions that process either strings or ASCII encoded bytes
     # accept multiple arguments. Check they reject mixed type input
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(
             (b"http", "www.python.org", "", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(
             ("http", b"www.python.org", "", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit((b"http", "www.python.org", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit(("http", b"www.python.org", "", "", ""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin("http://python.org", b"http://python.org")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin(b"http://python.org", "http://python.org")
Ejemplo n.º 2
0
    def checkRoundtrips(self, url, parsed, split):
        result = urllib_parse.urlparse(url)
        self.assertEqual(result, parsed)
        t = (result.scheme, result.netloc, result.path,
             result.params, result.query, result.fragment)
        self.assertEqual(t, parsed)
        # put it back together and it should be the same
        result2 = urllib_parse.urlunparse(result)
        self.assertEqual(result2, url)
        self.assertEqual(result2, result.geturl())

        # the result of geturl() is a fixpoint; we can always parse it
        # again to get the same result:
        result3 = urllib_parse.urlparse(result.geturl())
        self.assertEqual(result3.geturl(), result.geturl())
        self.assertEqual(result3,          result)
        self.assertEqual(result3.scheme,   result.scheme)
        self.assertEqual(result3.netloc,   result.netloc)
        self.assertEqual(result3.path,     result.path)
        self.assertEqual(result3.params,   result.params)
        self.assertEqual(result3.query,    result.query)
        self.assertEqual(result3.fragment, result.fragment)
        self.assertEqual(result3.username, result.username)
        self.assertEqual(result3.password, result.password)
        self.assertEqual(result3.hostname, result.hostname)
        self.assertEqual(result3.port,     result.port)

        # check the roundtrip using urlsplit() as well
        result = urllib_parse.urlsplit(url)
        self.assertEqual(result, split)
        t = (result.scheme, result.netloc, result.path,
             result.query, result.fragment)
        self.assertEqual(t, split)
        result2 = urllib_parse.urlunsplit(result)
        self.assertEqual(result2, url)
        self.assertEqual(result2, result.geturl())

        # check the fixpoint property of re-parsing the result of geturl()
        result3 = urllib_parse.urlsplit(result.geturl())
        self.assertEqual(result3.geturl(), result.geturl())
        self.assertEqual(result3,          result)
        self.assertEqual(result3.scheme,   result.scheme)
        self.assertEqual(result3.netloc,   result.netloc)
        self.assertEqual(result3.path,     result.path)
        self.assertEqual(result3.query,    result.query)
        self.assertEqual(result3.fragment, result.fragment)
        self.assertEqual(result3.username, result.username)
        self.assertEqual(result3.password, result.password)
        self.assertEqual(result3.hostname, result.hostname)
        self.assertEqual(result3.port,     result.port)
Ejemplo n.º 3
0
 def test_issue14072(self):
     p1 = urllib_parse.urlsplit('tel:+31-641044153')
     self.assertEqual(p1.scheme, 'tel')
     self.assertEqual(p1.path, '+31-641044153')
     p2 = urllib_parse.urlsplit('tel:+31641044153')
     self.assertEqual(p2.scheme, 'tel')
     self.assertEqual(p2.path, '+31641044153')
     # assert the behavior for urlparse
     p1 = urllib_parse.urlparse('tel:+31-641044153')
     self.assertEqual(p1.scheme, 'tel')
     self.assertEqual(p1.path, '+31-641044153')
     p2 = urllib_parse.urlparse('tel:+31641044153')
     self.assertEqual(p2.scheme, 'tel')
     self.assertEqual(p2.path, '+31641044153')
Ejemplo n.º 4
0
    def checkRoundtrips(self, url, parsed, split):
        result = urllib_parse.urlparse(url)
        self.assertEqual(result, parsed)
        t = (result.scheme, result.netloc, result.path,
             result.params, result.query, result.fragment)
        self.assertEqual(t, parsed)
        # put it back together and it should be the same
        result2 = urllib_parse.urlunparse(result)
        self.assertEqual(result2, url)
        self.assertEqual(result2, result.geturl())

        # the result of geturl() is a fixpoint; we can always parse it
        # again to get the same result:
        result3 = urllib_parse.urlparse(result.geturl())
        self.assertEqual(result3.geturl(), result.geturl())
        self.assertEqual(result3,          result)
        self.assertEqual(result3.scheme,   result.scheme)
        self.assertEqual(result3.netloc,   result.netloc)
        self.assertEqual(result3.path,     result.path)
        self.assertEqual(result3.params,   result.params)
        self.assertEqual(result3.query,    result.query)
        self.assertEqual(result3.fragment, result.fragment)
        self.assertEqual(result3.username, result.username)
        self.assertEqual(result3.password, result.password)
        self.assertEqual(result3.hostname, result.hostname)
        self.assertEqual(result3.port,     result.port)

        # check the roundtrip using urlsplit() as well
        result = urllib_parse.urlsplit(url)
        self.assertEqual(result, split)
        t = (result.scheme, result.netloc, result.path,
             result.query, result.fragment)
        self.assertEqual(t, split)
        result2 = urllib_parse.urlunsplit(result)
        self.assertEqual(result2, url)
        self.assertEqual(result2, result.geturl())

        # check the fixpoint property of re-parsing the result of geturl()
        result3 = urllib_parse.urlsplit(result.geturl())
        self.assertEqual(result3.geturl(), result.geturl())
        self.assertEqual(result3,          result)
        self.assertEqual(result3.scheme,   result.scheme)
        self.assertEqual(result3.netloc,   result.netloc)
        self.assertEqual(result3.path,     result.path)
        self.assertEqual(result3.query,    result.query)
        self.assertEqual(result3.fragment, result.fragment)
        self.assertEqual(result3.username, result.username)
        self.assertEqual(result3.password, result.password)
        self.assertEqual(result3.hostname, result.hostname)
        self.assertEqual(result3.port,     result.port)
Ejemplo n.º 5
0
 def test_issue14072(self):
     p1 = urllib_parse.urlsplit('tel:+31-641044153')
     self.assertEqual(p1.scheme, 'tel')
     self.assertEqual(p1.path, '+31-641044153')
     p2 = urllib_parse.urlsplit('tel:+31641044153')
     self.assertEqual(p2.scheme, 'tel')
     self.assertEqual(p2.path, '+31641044153')
     # assert the behavior for urlparse
     p1 = urllib_parse.urlparse('tel:+31-641044153')
     self.assertEqual(p1.scheme, 'tel')
     self.assertEqual(p1.path, '+31-641044153')
     p2 = urllib_parse.urlparse('tel:+31641044153')
     self.assertEqual(p2.scheme, 'tel')
     self.assertEqual(p2.path, '+31641044153')
 def _get_encoded_url(self):
     """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string."""
     sres = urllib_parse.urlsplit(self.file_path)
     return urllib_parse.urlunsplit(
         urllib_parse.SplitResult(sres.scheme, sres.netloc,
                                  urllib_parse.quote(sres.path), sres.query,
                                  sres.fragment))
Ejemplo n.º 7
0
    def test_attributes_bad_port(self):
        """Check handling of non-integer ports."""
        p = urllib_parse.urlsplit("http://www.example.net:foo")
        self.assertEqual(p.netloc, "www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)

        p = urllib_parse.urlparse("http://www.example.net:foo")
        self.assertEqual(p.netloc, "www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)

        # Once again, repeat ourselves to test bytes
        p = urllib_parse.urlsplit(b"http://www.example.net:foo")
        self.assertEqual(p.netloc, b"www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)

        p = urllib_parse.urlparse(b"http://www.example.net:foo")
        self.assertEqual(p.netloc, b"www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)
Ejemplo n.º 8
0
    def test_attributes_bad_port(self):
        """Check handling of non-integer ports."""
        p = urllib_parse.urlsplit("http://www.example.net:foo")
        self.assertEqual(p.netloc, "www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)

        p = urllib_parse.urlparse("http://www.example.net:foo")
        self.assertEqual(p.netloc, "www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)

        # Once again, repeat ourselves to test bytes
        p = urllib_parse.urlsplit(b"http://www.example.net:foo")
        self.assertEqual(p.netloc, b"www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)

        p = urllib_parse.urlparse(b"http://www.example.net:foo")
        self.assertEqual(p.netloc, b"www.example.net:foo")
        self.assertRaises(ValueError, lambda: p.port)
Ejemplo n.º 9
0
    def test_attributes_without_netloc(self):
        # This example is straight from RFC 3261.  It looks like it
        # should allow the username, hostname, and port to be filled
        # in, but doesn't.  Since it's a URI and doesn't use the
        # scheme://netloc syntax, the netloc and related attributes
        # should be left empty.
        uri = "sip:[email protected];maddr=239.255.255.1;ttl=15"
        p = urllib_parse.urlsplit(uri)
        self.assertEqual(p.netloc, "")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)

        p = urllib_parse.urlparse(uri)
        self.assertEqual(p.netloc, "")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)

        # You guessed it, repeating the test with bytes input
        uri = b"sip:[email protected];maddr=239.255.255.1;ttl=15"
        p = urllib_parse.urlsplit(uri)
        self.assertEqual(p.netloc, b"")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)

        p = urllib_parse.urlparse(uri)
        self.assertEqual(p.netloc, b"")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)
Ejemplo n.º 10
0
    def test_attributes_without_netloc(self):
        # This example is straight from RFC 3261.  It looks like it
        # should allow the username, hostname, and port to be filled
        # in, but doesn't.  Since it's a URI and doesn't use the
        # scheme://netloc syntax, the netloc and related attributes
        # should be left empty.
        uri = "sip:[email protected];maddr=239.255.255.1;ttl=15"
        p = urllib_parse.urlsplit(uri)
        self.assertEqual(p.netloc, "")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)

        p = urllib_parse.urlparse(uri)
        self.assertEqual(p.netloc, "")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)

        # You guessed it, repeating the test with bytes input
        uri = b"sip:[email protected];maddr=239.255.255.1;ttl=15"
        p = urllib_parse.urlsplit(uri)
        self.assertEqual(p.netloc, b"")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)

        p = urllib_parse.urlparse(uri)
        self.assertEqual(p.netloc, b"")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, None)
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl(), uri)
Ejemplo n.º 11
0
def get_or_post(uri,
                method,
                req,
                content_type=DEFAULT_POST_CONTENT_TYPE,
                accept=None,
                **kwargs):
    """

    :param uri:
    :param method:
    :param req:
    :param content_type:
    :param accept:
    :param kwargs:
    :return:
    """
    if method in ["GET", "DELETE"]:
        if req.keys():
            _req = req.copy()
            comp = urlsplit(str(uri))
            if comp.query:
                _req.update(parse_qs(comp.query))

            _query = str(_req.to_urlencoded())
            path = urlunsplit(
                (comp.scheme, comp.netloc, comp.path, _query, comp.fragment))
        else:
            path = uri
        body = None
    elif method in ["POST", "PUT"]:
        path = uri
        if content_type == URL_ENCODED:
            body = req.to_urlencoded()
        elif content_type == JSON_ENCODED:
            body = req.to_json()
        else:
            raise UnSupported("Unsupported content type: '%s'" % content_type)

        header_ext = {"Content-Type": content_type}
        if accept:
            header_ext = {"Accept": accept}

        if "headers" in kwargs.keys():
            kwargs["headers"].update(header_ext)
        else:
            kwargs["headers"] = header_ext
    else:
        raise UnSupported("Unsupported HTTP method: '%s'" % method)

    return path, body, kwargs
Ejemplo n.º 12
0
 def test_mixed_types_rejected(self):
     # Several functions that process either strings or ASCII encoded bytes
     # accept multiple arguments. Check they reject mixed type input
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlparse(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit("www.python.org", b"http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlsplit(b"www.python.org", "http")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(( b"http", "www.python.org","","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunparse(("http", b"www.python.org","","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit((b"http", "www.python.org","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urlunsplit(("http", b"www.python.org","","",""))
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin("http://python.org", b"http://python.org")
     with self.assertRaisesRegex(TypeError, "Cannot mix str"):
         urllib_parse.urljoin(b"http://python.org", "http://python.org")
Ejemplo n.º 13
0
 def test_unparse_parse(self):
     str_cases = [
         'Python',
         './Python',
         'x-newscheme://foo.com/stuff',
         'x://y',
         'x:/y',
         'x:/',
         '/',
     ]
     bytes_cases = [x.encode('ascii') for x in str_cases]
     for u in str_cases + bytes_cases:
         self.assertEqual(urllib_parse.urlunsplit(urllib_parse.urlsplit(u)),
                          u)
         self.assertEqual(urllib_parse.urlunparse(urllib_parse.urlparse(u)),
                          u)
Ejemplo n.º 14
0
def get_or_post(uri, method, req, content_type=DEFAULT_POST_CONTENT_TYPE,
                accept=None, **kwargs):
    """

    :param uri:
    :param method:
    :param req:
    :param content_type:
    :param accept:
    :param kwargs:
    :return:
    """
    if method in ["GET", "DELETE"]:
        if req.keys():
            _req = req.copy()
            comp = urlsplit(str(uri))
            if comp.query:
                _req.update(parse_qs(comp.query))

            _query = str(_req.to_urlencoded())
            path = urlunsplit((comp.scheme, comp.netloc, comp.path,
                               _query, comp.fragment))
        else:
            path = uri
        body = None
    elif method in ["POST", "PUT"]:
        path = uri
        if content_type == URL_ENCODED:
            body = req.to_urlencoded()
        elif content_type == JSON_ENCODED:
            body = req.to_json()
        else:
            raise UnSupported(
                "Unsupported content type: '%s'" % content_type)

        header_ext = {"Content-Type": content_type}
        if accept:
            header_ext = {"Accept": accept}

        if "headers" in kwargs.keys():
            kwargs["headers"].update(header_ext)
        else:
            kwargs["headers"] = header_ext
    else:
        raise UnSupported("Unsupported HTTP method: '%s'" % method)

    return path, body, kwargs
Ejemplo n.º 15
0
    def download(self, custom_path=None, out=None, timeout=None):
        """
        Download this file. By default, the file is saved in the current working directory with its
        original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be
        saved to that path instead. If :attr:`out` is defined, the file contents will be saved to
        that object using the ``out.write`` method.

        Note:
            `custom_path` and `out` are mutually exclusive.

        Args:
            custom_path (:obj:`str`, optional): Custom path.
            out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
                applicable.
            timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
                the read timeout from the server (instead of the one specified during creation of
                the connection pool).

        Raises:
            ValueError: If both ``custom_path`` and ``out`` are passed.

        """
        if custom_path is not None and out is not None:
            raise ValueError('custom_path and out are mutually exclusive')

        # Convert any UTF-8 char into a url encoded ASCII string.
        sres = urllib_parse.urlsplit(self.file_path)
        url = urllib_parse.urlunsplit(
            urllib_parse.SplitResult(sres.scheme, sres.netloc,
                                     urllib_parse.quote(sres.path), sres.query,
                                     sres.fragment))

        if out:
            buf = self.bot.request.retrieve(url)
            out.write(buf)

        else:
            if custom_path:
                filename = custom_path
            else:
                filename = basename(self.file_path)

            self.bot.request.download(url, filename, timeout=timeout)
Ejemplo n.º 16
0
    def download(self, custom_path=None, out=None, timeout=None):
        """
        Download this file. By default, the file is saved in the current working directory with its
        original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be
        saved to that path instead. If :attr:`out` is defined, the file contents will be saved to
        that object using the ``out.write`` method.

        Note:
            `custom_path` and `out` are mutually exclusive.

        Args:
            custom_path (:obj:`str`, optional): Custom path.
            out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
                applicable.
            timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
                the read timeout from the server (instead of the one specified during creation of
                the connection pool).

        Raises:
            ValueError: If both ``custom_path`` and ``out`` are passed.
        """

        if custom_path is not None and out is not None:
            raise ValueError('custom_path and out are mutually exclusive')

        # Convert any UTF-8 char into a url encoded ASCII string.
        sres = urllib_parse.urlsplit(self.file_path)
        url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
            sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))

        if out:
            buf = self.bot.request.retrieve(url)
            out.write(buf)

        else:
            if custom_path:
                filename = custom_path
            else:
                filename = basename(self.file_path)

            self.bot.request.download(url, filename, timeout=timeout)
Ejemplo n.º 17
0
def create_return_url(base, query, **kwargs):
    """
    Add a query string plus extra parameters to a base URL which may contain
    a query part already.

    :param base: redirect_uri may contain a query part, no fragment allowed.
    :param query: Old query part as a string
    :param kwargs: extra query parameters
    :return:
    """
    part = urlsplit(base)
    if part.fragment:
        raise ValueError("Base URL contained parts it shouldn't")

    for key, values in parse_qs(query).items():
        if key in kwargs:
            if isinstance(kwargs[key], six.string_types):
                kwargs[key] = [kwargs[key]]
            kwargs[key].extend(values)
        else:
            kwargs[key] = values

    if part.query:
        for key, values in parse_qs(part.query).items():
            if key in kwargs:
                if isinstance(kwargs[key], six.string_types):
                    kwargs[key] = [kwargs[key]]
                kwargs[key].extend(values)
            else:
                kwargs[key] = values

        _pre = base.split("?")[0]
    else:
        _pre = base

    logger.debug("kwargs: %s" % sanitize(kwargs))
    if kwargs:
        return "%s?%s" % (_pre, url_encode_params(kwargs))
    else:
        return _pre
Ejemplo n.º 18
0
def create_return_url(base, query, **kwargs):
    """
    Add a query string plus extra parameters to a base URL which may contain
    a query part already.

    :param base: redirect_uri may contain a query part, no fragment allowed.
    :param query: Old query part as a string
    :param kwargs: extra query parameters
    :return:
    """
    part = urlsplit(base)
    if part.fragment:
        raise ValueError("Base URL contained parts it shouldn't")

    for key, values in parse_qs(query).items():
        if key in kwargs:
            if isinstance(kwargs[key], six.string_types):
                kwargs[key] = [kwargs[key]]
            kwargs[key].extend(values)
        else:
            kwargs[key] = values

    if part.query:
        for key, values in parse_qs(part.query).items():
            if key in kwargs:
                if isinstance(kwargs[key], six.string_types):
                    kwargs[key] = [kwargs[key]]
                kwargs[key].extend(values)
            else:
                kwargs[key] = values

        _pre = base.split("?")[0]
    else:
        _pre = base

    logger.debug("kwargs: %s" % kwargs)
    if kwargs:
        return "%s?%s" % (_pre, url_encode_params(kwargs))
    else:
        return _pre
Ejemplo n.º 19
0
def key_export(baseurl, local_path, vault, keyjar, **kwargs):
    """
    :param baseurl: The base URL to which the key file names are added
    :param local_path: Where on the machine the export files are kept
    :param vault: Where the keys are kept
    :param keyjar: Where to store the exported keys
    :return: 2-tuple: result of urlsplit and a dictionary with
        parameter name as key and url and value
    """
    part = urlsplit(baseurl)

    # deal with the export directory
    if part.path.endswith("/"):
        _path = part.path[:-1]
    else:
        _path = part.path[:]

    local_path = proper_path("%s/%s" % (_path, local_path))

    if not os.path.exists(local_path):
        os.makedirs(local_path)

    kb = key_setup(vault, **kwargs)

    try:
        keyjar[""].append(kb)
    except KeyError:
        keyjar[""] = kb

    # the local filename
    _export_filename = os.path.join(local_path, "jwks")

    with open(_export_filename, "w") as f:
        f.write(str(kb))

    _url = "%s://%s%s" % (part.scheme, part.netloc,
                          _export_filename[1:])

    return _url
Ejemplo n.º 20
0
def key_export(baseurl, local_path, vault, keyjar, **kwargs):
    """
    :param baseurl: The base URL to which the key file names are added
    :param local_path: Where on the machine the export files are kept
    :param vault: Where the keys are kept
    :param keyjar: Where to store the exported keys
    :return: 2-tuple: result of urlsplit and a dictionary with
        parameter name as key and url and value
    """
    part = urlsplit(baseurl)

    # deal with the export directory
    if part.path.endswith("/"):
        _path = part.path[:-1]
    else:
        _path = part.path[:]

    local_path = proper_path("%s/%s" % (_path, local_path))

    if not os.path.exists(local_path):
        os.makedirs(local_path)

    kb = key_setup(vault, **kwargs)

    try:
        keyjar[""].append(kb)
    except KeyError:
        keyjar[""] = kb

    # the local filename
    _export_filename = os.path.join(local_path, "jwks")

    with open(_export_filename, "w") as f:
        f.write(str(kb))

    _url = "%s://%s%s" % (part.scheme, part.netloc,
                          _export_filename[1:])

    return _url
Ejemplo n.º 21
0
    def generate_return_url(self, return_to, uid, path=""):
        """
        :param return_to: If it starts with '/' it's an absolute path otherwise
        a relative path.
        :param uid:
        :param path: The verify path
        """
        if return_to.startswith("http"):
            up = urlsplit(return_to)
            _path = up.path
        else:
            up = None
            _path = return_to

        if not _path.startswith("/"):
            p = path.split("/")
            p[-1] = _path
            _path = "/".join(p)

        if up:
            _path = urlunsplit([up[0], up[1], _path, up[3], up[4]])

        return create_return_url(_path, uid, **{self.query_param: "true"})
Ejemplo n.º 22
0
    def generate_return_url(self, return_to, uid, path=""):
        """
        :param return_to: If it starts with '/' it's an absolute path otherwise
        a relative path.
        :param uid:
        :param path: The verify path
        """
        if return_to.startswith("http"):
            up = urlsplit(return_to)
            _path = up.path
        else:
            up = None
            _path = return_to

        if not _path.startswith("/"):
            p = path.split("/")
            p[-1] = _path
            _path = "/".join(p)

        if up:
            _path = urlunsplit([up[0], up[1], _path, up[3], up[4]])

        return create_return_url(_path, uid, **{self.query_param: "true"})
Ejemplo n.º 23
0
    def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
        """Send a request to the server.

        `method' specifies an HTTP request method, e.g. 'GET'.
        `url' specifies the object being requested, e.g. '/index.html'.
        `skip_host' if True does not add automatically a 'Host:' header
        `skip_accept_encoding' if True does not add automatically an
           'Accept-Encoding:' header
        """

        # if a prior response has been completed, then forget about it.
        if self.__response and self.__response.isclosed():
            self.__response = None


        # in certain cases, we cannot issue another request on this connection.
        # this occurs when:
        #   1) we are in the process of sending a request.   (_CS_REQ_STARTED)
        #   2) a response to a previous request has signalled that it is going
        #      to close the connection upon completion.
        #   3) the headers for the previous response have not been read, thus
        #      we cannot determine whether point (2) is true.   (_CS_REQ_SENT)
        #
        # if there is no prior response, then we can request at will.
        #
        # if point (2) is true, then we will have passed the socket to the
        # response (effectively meaning, "there is no prior response"), and
        # will open a new one when a new request is made.
        #
        # Note: if a prior response exists, then we *can* start a new request.
        #       We are not allowed to begin fetching the response to this new
        #       request, however, until that prior response is complete.
        #
        if self.__state == _CS_IDLE:
            self.__state = _CS_REQ_STARTED
        else:
            raise CannotSendRequest(self.__state)

        # Save the method we use, we need it later in the response phase
        self._method = method
        if not url:
            url = '/'
        request = '%s %s %s' % (method, url, self._http_vsn_str)

        # Non-ASCII characters should have been eliminated earlier
        self._output(request.encode('ascii'))

        if self._http_vsn == 11:
            # Issue some standard headers for better HTTP/1.1 compliance

            if not skip_host:
                # this header is issued *only* for HTTP/1.1
                # connections. more specifically, this means it is
                # only issued when the client uses the new
                # HTTPConnection() class. backwards-compat clients
                # will be using HTTP/1.0 and those clients may be
                # issuing this header themselves. we should NOT issue
                # it twice; some web servers (such as Apache) barf
                # when they see two Host: headers

                # If we need a non-standard port,include it in the
                # header.  If the request is going through a proxy,
                # but the host of the actual URL, not the host of the
                # proxy.

                netloc = ''
                if url.startswith('http'):
                    nil, netloc, nil, nil, nil = urlsplit(url)

                if netloc:
                    try:
                        netloc_enc = netloc.encode("ascii")
                    except UnicodeEncodeError:
                        netloc_enc = netloc.encode("idna")
                    self.putheader('Host', netloc_enc)
                else:
                    try:
                        host_enc = self.host.encode("ascii")
                    except UnicodeEncodeError:
                        host_enc = self.host.encode("idna")

                    # As per RFC 273, IPv6 address should be wrapped with []
                    # when used as Host header

                    if self.host.find(':') >= 0:
                        host_enc = bytes(b'[' + host_enc + b']')

                    if self.port == self.default_port:
                        self.putheader('Host', host_enc)
                    else:
                        host_enc = host_enc.decode("ascii")
                        self.putheader('Host', "%s:%s" % (host_enc, self.port))

            # note: we are assuming that clients will not attempt to set these
            #       headers since *this* library must deal with the
            #       consequences. this also means that when the supporting
            #       libraries are updated to recognize other forms, then this
            #       code should be changed (removed or updated).

            # we only want a Content-Encoding of "identity" since we don't
            # support encodings such as x-gzip or x-deflate.
            if not skip_accept_encoding:
                self.putheader('Accept-Encoding', 'identity')

            # we can accept "chunked" Transfer-Encodings, but no others
            # NOTE: no TE header implies *only* "chunked"
            #self.putheader('TE', 'chunked')

            # if TE is supplied in the header, then it must appear in a
            # Connection header.
            #self.putheader('Connection', 'TE')

        else:
            # For HTTP/1.0, the server will assume "not chunked"
            pass
Ejemplo n.º 24
0
 def get_path(self, redirect_uris, issuer):
     for ruri in redirect_uris:
         p = urlsplit(ruri)
         self.path[p.path[1:]] = issuer
Ejemplo n.º 25
0
    def test_urlsplit_attributes(self):
        url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, "http")
        self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
        self.assertEqual(p.path, "/doc/")
        self.assertEqual(p.query, "")
        self.assertEqual(p.fragment, "frag")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, "www.python.org")
        self.assertEqual(p.port, None)
        # geturl() won't return exactly the original URL in this case
        # since the scheme is always case-normalized
        # We handle this by ignoring the first 4 characters of the URL
        self.assertEqual(p.geturl()[4:], url[4:])

        url = "http://*****:*****@www.python.org:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, "http")
        self.assertEqual(p.netloc, "User:[email protected]:080")
        self.assertEqual(p.path, "/doc/")
        self.assertEqual(p.query, "query=yes")
        self.assertEqual(p.fragment, "frag")
        self.assertEqual(p.username, "User")
        self.assertEqual(p.password, "Pass")
        self.assertEqual(p.hostname, "www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        # Addressing issue1698, which suggests Username can contain
        # "@" characters.  Though not RFC compliant, many ftp sites allow
        # and request email addresses as usernames.

        url = "http://[email protected]:[email protected]:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, "http")
        self.assertEqual(p.netloc, "[email protected]:[email protected]:080")
        self.assertEqual(p.path, "/doc/")
        self.assertEqual(p.query, "query=yes")
        self.assertEqual(p.fragment, "frag")
        self.assertEqual(p.username, "*****@*****.**")
        self.assertEqual(p.password, "Pass")
        self.assertEqual(p.hostname, "www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        # And check them all again, only with bytes this time
        url = b"HTTP://WWW.PYTHON.ORG/doc/#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, b"http")
        self.assertEqual(p.netloc, b"WWW.PYTHON.ORG")
        self.assertEqual(p.path, b"/doc/")
        self.assertEqual(p.query, b"")
        self.assertEqual(p.fragment, b"frag")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, b"www.python.org")
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl()[4:], url[4:])

        url = b"http://*****:*****@www.python.org:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, b"http")
        self.assertEqual(p.netloc, b"User:[email protected]:080")
        self.assertEqual(p.path, b"/doc/")
        self.assertEqual(p.query, b"query=yes")
        self.assertEqual(p.fragment, b"frag")
        self.assertEqual(p.username, b"User")
        self.assertEqual(p.password, b"Pass")
        self.assertEqual(p.hostname, b"www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        url = b"http://[email protected]:[email protected]:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, b"http")
        self.assertEqual(p.netloc, b"[email protected]:[email protected]:080")
        self.assertEqual(p.path, b"/doc/")
        self.assertEqual(p.query, b"query=yes")
        self.assertEqual(p.fragment, b"frag")
        self.assertEqual(p.username, b"*****@*****.**")
        self.assertEqual(p.password, b"Pass")
        self.assertEqual(p.hostname, b"www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        # Verify an illegal port is returned as None
        url = b"HTTP://WWW.PYTHON.ORG:65536/doc/#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.port, None)
Ejemplo n.º 26
0
 def filename(self):
     return os.path.basename(
         self.file.name if self.file else urlsplit(self.hyperlink).path)
Ejemplo n.º 27
0
    def test_urlsplit_attributes(self):
        url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, "http")
        self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
        self.assertEqual(p.path, "/doc/")
        self.assertEqual(p.query, "")
        self.assertEqual(p.fragment, "frag")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, "www.python.org")
        self.assertEqual(p.port, None)
        # geturl() won't return exactly the original URL in this case
        # since the scheme is always case-normalized
        # We handle this by ignoring the first 4 characters of the URL
        self.assertEqual(p.geturl()[4:], url[4:])

        url = "http://*****:*****@www.python.org:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, "http")
        self.assertEqual(p.netloc, "User:[email protected]:080")
        self.assertEqual(p.path, "/doc/")
        self.assertEqual(p.query, "query=yes")
        self.assertEqual(p.fragment, "frag")
        self.assertEqual(p.username, "User")
        self.assertEqual(p.password, "Pass")
        self.assertEqual(p.hostname, "www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        # Addressing issue1698, which suggests Username can contain
        # "@" characters.  Though not RFC compliant, many ftp sites allow
        # and request email addresses as usernames.

        url = "http://[email protected]:[email protected]:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, "http")
        self.assertEqual(p.netloc, "[email protected]:[email protected]:080")
        self.assertEqual(p.path, "/doc/")
        self.assertEqual(p.query, "query=yes")
        self.assertEqual(p.fragment, "frag")
        self.assertEqual(p.username, "*****@*****.**")
        self.assertEqual(p.password, "Pass")
        self.assertEqual(p.hostname, "www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        # And check them all again, only with bytes this time
        url = b"HTTP://WWW.PYTHON.ORG/doc/#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, b"http")
        self.assertEqual(p.netloc, b"WWW.PYTHON.ORG")
        self.assertEqual(p.path, b"/doc/")
        self.assertEqual(p.query, b"")
        self.assertEqual(p.fragment, b"frag")
        self.assertEqual(p.username, None)
        self.assertEqual(p.password, None)
        self.assertEqual(p.hostname, b"www.python.org")
        self.assertEqual(p.port, None)
        self.assertEqual(p.geturl()[4:], url[4:])

        url = b"http://*****:*****@www.python.org:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, b"http")
        self.assertEqual(p.netloc, b"User:[email protected]:080")
        self.assertEqual(p.path, b"/doc/")
        self.assertEqual(p.query, b"query=yes")
        self.assertEqual(p.fragment, b"frag")
        self.assertEqual(p.username, b"User")
        self.assertEqual(p.password, b"Pass")
        self.assertEqual(p.hostname, b"www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        url = b"http://[email protected]:[email protected]:080/doc/?query=yes#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.scheme, b"http")
        self.assertEqual(p.netloc, b"[email protected]:[email protected]:080")
        self.assertEqual(p.path, b"/doc/")
        self.assertEqual(p.query, b"query=yes")
        self.assertEqual(p.fragment, b"frag")
        self.assertEqual(p.username, b"*****@*****.**")
        self.assertEqual(p.password, b"Pass")
        self.assertEqual(p.hostname, b"www.python.org")
        self.assertEqual(p.port, 80)
        self.assertEqual(p.geturl(), url)

        # Verify an illegal port is returned as None
        url = b"HTTP://WWW.PYTHON.ORG:65536/doc/#frag"
        p = urllib_parse.urlsplit(url)
        self.assertEqual(p.port, None)
Ejemplo n.º 28
0
 def test_unparse_parse(self):
     str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
     bytes_cases = [x.encode('ascii') for x in str_cases]
     for u in str_cases + bytes_cases:
         self.assertEqual(urllib_parse.urlunsplit(urllib_parse.urlsplit(u)), u)
         self.assertEqual(urllib_parse.urlunparse(urllib_parse.urlparse(u)), u)
Ejemplo n.º 29
0
 def _get_encoded_url(self):
     """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string."""
     sres = urllib_parse.urlsplit(self.file_path)
     return urllib_parse.urlunsplit(urllib_parse.SplitResult(
         sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
Ejemplo n.º 30
0
 def get_path(self, redirect_uris, issuer):
     for ruri in redirect_uris:
         p = urlsplit(ruri)
         self.path[p.path[1:]] = issuer