Esempio n. 1
0
def get_download_data(client, package, version, is_url, all_urls, noprompt, manual_url):
    data = client.release_data(package, version) if not is_url else None
    urls = client.release_urls(package, version) if not is_url else [package]
    if not is_url and not all_urls:
        # Try to find source urls
        urls = [url for url in urls if url['python_version'] == 'source']
    if not urls:
        if 'download_url' in data:
            urls = [defaultdict(str, {'url': data['download_url']})]
            if not urls[0]['url']:
                # The package doesn't have a url, or maybe it only has a wheel.
                sys.exit("Error: Could not build recipe for %s. "
                    "Could not find any valid urls." % package)
            U = parse_url(urls[0]['url'])
            if not U.path:
                sys.exit("Error: Could not parse url for %s: %s" %
                    (package, U))
            urls[0]['filename'] = U.path.rsplit('/')[-1]
            fragment = U.fragment or ''
            if fragment.startswith('md5='):
                md5 = fragment[len('md5='):]
            else:
                md5 = ''
        else:
            sys.exit("Error: No source urls found for %s" % package)
    if len(urls) > 1 and not noprompt:
        print("More than one source version is available for %s:" %
                package)
        if manual_url:
            for i, url in enumerate(urls):
                print("%d: %s (%s) %s" % (i, url['url'],
                        human_bytes(url['size']), url['comment_text']))
            n = int(input("which version should i use? "))
        else:
            print("Using the one with the least source size")
            print("use --manual-url to override this behavior")
            _, n = min([(url['size'], i)
                                for (i, url) in enumerate(urls)])
    else:
        n = 0

    if not is_url:
        print("Using url %s (%s) for %s." % (urls[n]['url'],
            human_bytes(urls[n]['size'] or 0), package))
        pypiurl = urls[n]['url']
        md5 = urls[n]['md5_digest']
        filename = urls[n]['filename'] or 'package'
    else:
        print("Using url %s" % package)
        pypiurl = package
        U = parse_url(package)
        if U.fragment and U.fragment.startswith('md5='):
            md5 = U.fragment[len('md5='):]
        else:
            md5 = ''
        # TODO: 'package' won't work with unpack()
        filename = U.path.rsplit('/', 1)[-1] or 'package'

    return (data, pypiurl, filename, md5)
Esempio n. 2
0
def get_download_data(args, client, package, version, is_url):
    data = client.release_data(package, version) if not is_url else None
    urls = client.release_urls(package, version) if not is_url else [package]
    if not is_url and not args.all_urls:
        # Try to find source urls
        urls = [url for url in urls if url["python_version"] == "source"]
    if not urls:
        if "download_url" in data:
            urls = [defaultdict(str, {"url": data["download_url"]})]
            if not urls[0]["url"]:
                # The package doesn't have a url, or maybe it only has a wheel.
                sys.exit("Error: Could not build recipe for %s. " "Could not find any valid urls." % package)
            U = parse_url(urls[0]["url"])
            if not U.path:
                sys.exit("Error: Could not parse url for %s: %s" % (package, U))
            urls[0]["filename"] = U.path.rsplit("/")[-1]
            fragment = U.fragment or ""
            if fragment.startswith("md5="):
                md5 = fragment[len("md5=") :]
            else:
                md5 = ""
        else:
            sys.exit("Error: No source urls found for %s" % package)
    if len(urls) > 1 and not args.noprompt:
        print("More than one source version is available for %s:" % package)
        if args.manual_url:
            for i, url in enumerate(urls):
                print("%d: %s (%s) %s" % (i, url["url"], human_bytes(url["size"]), url["comment_text"]))
            n = int(input("which version should i use? "))
        else:
            print("Using the one with the least source size")
            print("use --manual-url to override this behavior")
            min_siz, n = min([(url["size"], i) for (i, url) in enumerate(urls)])
    else:
        n = 0

    if not is_url:
        print("Using url %s (%s) for %s." % (urls[n]["url"], human_bytes(urls[n]["size"] or 0), package))
        pypiurl = urls[n]["url"]
        md5 = urls[n]["md5_digest"]
        filename = urls[n]["filename"]
    else:
        print("Using url %s" % package)
        pypiurl = package
        U = parse_url(package)
        if U.fragment and U.fragment.startswith("md5="):
            md5 = U.fragment[len("md5=") :]
        else:
            md5 = ""
        # TODO: 'package' won't work with unpack()
        filename = U.path.rsplit("/", 1)[-1] or "package"

    return (data, pypiurl, filename, md5)
Esempio n. 3
0
    def to_safeurl(self, url):
        # type: (str) -> Tuple[str, str]
        # Try and see if there is any sort of redirection in the recipient URL
        # if yes, get the final URL to be passed into the validator
        if self.allow_redirects:
            try:
                _dummy_resp = requests.head(url, allow_redirects=True)
                url = _dummy_resp.url
            except ConnectionError:
                logger.warning("Recipient URL not reachable")

        parts = parse_url(url)
        host = parts.host
        addr = socket.gethostbyname(host)
        safeurl = Url(
            scheme=parts.scheme,
            auth=parts.auth,
            host=addr,
            port=parts.port,
            path=parts.path,
            query=parts.query,
            fragment=parts.fragment,
        )
        block_internal_ips()(addr)
        return host, safeurl.url
Esempio n. 4
0
    def __http_login_sync(self):
        self.http_session = requests.Session()
        self.http_session.cookies.set('legal-acknowledge',
                                      'yes',
                                      domain=parse_url(self.LOGIN_URI).host,
                                      path='/')

        try:
            login_response = self.http_session.post(
                self.LOGIN_URI,
                allow_redirects=False,  # Just the first response
                data={
                    'op': 'login',
                    'redirect': '/',
                    'emailaddress': self.username,
                    'password': self.password,
                })
        except requests.RequestException:
            raise AWLConnectionError(f"Could not connect to {self.LOGIN_URI}")

        try:
            login_response.raise_for_status()
        except requests.HTTPError:
            raise AWLLoginError(f"Login failed: {login_response.reason}")

        if self.session_id is None:
            raise AWLLoginError("Login failed; could not establish session. "
                                "Check credentials.")
Esempio n. 5
0
def _get_url_and_path(url: str) -> str:
    """Construct URL only containing scheme, netloc and path by truncating other parts.

    This method complies with RFC 3986.

    Examples
    --------
    >>> _get_url_and_path("http://example.com/path;segment?ab=xy&zed=qwe#test=1&foo=bar")
    "http://example.com/path;segment"


    Parameters
    ----------
    url : str
        URL to parse.

    Returns
    -------
    url : str
        URL with scheme, netloc and path

    """
    url_parsed = urlsplit(url)
    url_and_path = urlunparse(
        [url_parsed.scheme, url_parsed.netloc, url_parsed.path, None, None, None]
    )
    return parse_url(url_and_path).url
Esempio n. 6
0
    def connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and 'http' in self.proxies:
            parts = parse_url(self.proxies['http'])
            proxy_host, proxy_port = parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(':')
        else:
            proxy_auth, proxy_port, proxy_host = None, None, None

        try:
            #self.websocket = create_connection(ws_url,
            #                                   http_proxy_host=proxy_host,
            #                                   http_proxy_port=proxy_port,
            #                                   http_proxy_auth=proxy_auth)
            self.websocket = WebSocket(sslopt={"cert_reqs": ssl.CERT_NONE})
            self.websocket.connect(ws_url,
                                   http_proxy_host=proxy_host,
                                   http_proxy_port=proxy_port,
                                   http_proxy_auth=proxy_auth)
            self.connected = True
            self.last_connected_at = time.time()
            logging.debug("RTM connected")
            self.websocket.sock.setblocking(0)
        except Exception as e:
            self.connected = False
            raise SlackConnectionError(message=str(e))
Esempio n. 7
0
    def to_safeurl(self, url):
        # type: (str) -> Tuple[str, str]
        # Try and see if there is any sort of redirection in the recipient URL
        # if yes, get the final URL to be passed into the validator
        if self.allow_redirects:
            try:
                _dummy_resp = requests.head(url, allow_redirects=True)
                url = _dummy_resp.url
            except ConnectionError:
                logger.warning("Recipient URL not reachable")

        parts = parse_url(url)
        host = parts.host
        addr = socket.gethostbyname(host)
        safeurl = Url(
            scheme=parts.scheme,
            auth=parts.auth,
            host=addr,
            port=parts.port,
            path=parts.path,
            query=parts.query,
            fragment=parts.fragment,
        )
        block_internal_ips()(addr)
        return host, safeurl.url
Esempio n. 8
0
    def connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and "http" in self.proxies:
            parts = parse_url(self.proxies["http"])
            proxy_host, proxy_port = parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(":")
        else:
            proxy_auth, proxy_port, proxy_host = None, None, None

        try:
            self.websocket = create_connection(
                ws_url,
                http_proxy_host=proxy_host,
                http_proxy_port=proxy_port,
                http_proxy_auth=proxy_auth,
                #sslopt={"protocol": 2, "cert_reqs": 0}
            )
            self.connected = True
            self.last_connected_at = time.time()
            logging.debug("RTM connected")
            self.websocket.sock.setblocking(0)
        except Exception as e:
            self.connected = False
            raise SlackConnectionError(message=str(e))
Esempio n. 9
0
def url_to_s3_info(url):
    """
    Convert a S3 url to a tuple of bucket and key
    """
    parsed_url = parse_url(url)
    assert parsed_url.scheme == 's3', "You can only use s3: urls (not %r)" % url
    bucket, key = parsed_url.host, parsed_url.path
    return bucket, key
Esempio n. 10
0
def url_to_s3_info(url):
    """
    Convert a S3 url to a tuple of bucket and key
    """
    parsed_url = parse_url(url)
    assert parsed_url.scheme == 's3', "You can only use s3: urls (not %r)" % url
    bucket, key = parsed_url.host, parsed_url.path
    return bucket, key
Esempio n. 11
0
    def requests_kwargs_from_nectar_config(config):
        """
        Take a Nectar configuration and map it to a set of requests keyword arguments.

        These keyword arguments can be used with the Python requests ``requests.request``
        API. In the future when Nectar is just a memory, this can be adapted to map a
        Pulp importer configuration to requests kwargs.

        :param config: A nectar configuration instance
        :type  config: nectar.config.DownloaderConfig

        :return: A dictionary of keyword arguments for the requests API.
        :rtype:  dict
        """
        requests_kwargs = {}

        # Configure basic authentication
        if config.basic_auth_username and config.basic_auth_password:
            requests_kwargs['auth'] = (config.basic_auth_username,
                                       config.basic_auth_password)

        # Configure verification of the server's TLS certificates;
        # defaults to the system trust store.
        if config.ssl_validation is not False:
            if config.ssl_ca_cert_path:
                requests_kwargs['verify'] = config.ssl_ca_cert_path
            else:
                requests_kwargs['verify'] = True
        else:
            requests_kwargs['verify'] = False

        # Configure client-side certificate authentication
        if config.ssl_client_cert_path and config.ssl_client_key_path:
            requests_kwargs['cert'] = (config.ssl_client_cert_path,
                                       config.ssl_client_key_path)

        # Configure proxy servers and proxy authentication.
        #
        # Annoyingly, although the config is called 'proxy_url', the port and basic auth
        # credentials are defined separately, so we have to build the url.
        if config.proxy_url and config.proxy_port:
            parsed_url = urllib3_url.parse_url(config.proxy_url)
            proxy_auth = None
            if config.proxy_username and config.proxy_password:
                proxy_auth = '{user}:{password}'.format(
                    user=config.proxy_username, password=config.proxy_password)
            parsed_url = urllib3_url.Url(scheme=parsed_url.scheme,
                                         auth=proxy_auth,
                                         host=parsed_url.host,
                                         port=config.proxy_port)
            requests_kwargs['proxies'] = {
                'http': parsed_url.url,
                'https': parsed_url.url
            }

        requests_kwargs[
            'stream'] = True  # required for reading the download in chunks
        return requests_kwargs
Esempio n. 12
0
 def __init__(self, url, timeout=DEFAULT_TIMEOUT):
     from requests.packages.urllib3.util.url import parse_url
     self._parsed_url = parse_url(url)
     self._url = url
     self.host = str(self._parsed_url.host)
     if self._parsed_url.port:
         self.port = int(self._parsed_url.port)
     else:
         self.port = 12202
     if isinstance(timeout, six.string_types):
         timeout = int(timeout)
     self.timeout = timeout
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Esempio n. 13
0
 def __init__(self, url, timeout=DEFAULT_TIMEOUT):
     from requests.packages.urllib3.util.url import parse_url
     self._parsed_url = parse_url(url)
     self._url = url
     self.host = str(self._parsed_url.host)
     if self._parsed_url.port:
         self.port = int(self._parsed_url.port)
     else:
         self.port = 12202
     if isinstance(timeout, six.string_types):
         timeout = int(timeout)
     self.timeout = timeout
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Esempio n. 14
0
    def requests_kwargs_from_nectar_config(config):
        """
        Take a Nectar configuration and map it to a set of requests keyword arguments.

        These keyword arguments can be used with the Python requests ``requests.request``
        API. In the future when Nectar is just a memory, this can be adapted to map a
        Pulp importer configuration to requests kwargs.

        :param config: A nectar configuration instance
        :type  config: nectar.config.DownloaderConfig

        :return: A dictionary of keyword arguments for the requests API.
        :rtype:  dict
        """
        requests_kwargs = {}

        # Configure basic authentication
        if config.basic_auth_username and config.basic_auth_password:
            requests_kwargs['auth'] = (config.basic_auth_username, config.basic_auth_password)

        # Configure verification of the server's TLS certificates;
        # defaults to the system trust store.
        if config.ssl_validation is not False:
            if config.ssl_ca_cert_path:
                requests_kwargs['verify'] = config.ssl_ca_cert_path
            else:
                requests_kwargs['verify'] = True
        else:
            requests_kwargs['verify'] = False

        # Configure client-side certificate authentication
        if config.ssl_client_cert_path and config.ssl_client_key_path:
            requests_kwargs['cert'] = (config.ssl_client_cert_path, config.ssl_client_key_path)

        # Configure proxy servers and proxy authentication.
        #
        # Annoyingly, although the config is called 'proxy_url', the port and basic auth
        # credentials are defined separately, so we have to build the url.
        if config.proxy_url and config.proxy_port:
            parsed_url = urllib3_url.parse_url(config.proxy_url)
            proxy_auth = None
            if config.proxy_username and config.proxy_password:
                proxy_auth = '{user}:{password}'.format(user=config.proxy_username,
                                                        password=config.proxy_password)
            parsed_url = urllib3_url.Url(scheme=parsed_url.scheme, auth=proxy_auth,
                                         host=parsed_url.host, port=config.proxy_port)
            requests_kwargs['proxies'] = {'http': parsed_url.url, 'https': parsed_url.url}

        requests_kwargs['stream'] = True  # required for reading the download in chunks
        return requests_kwargs
Esempio n. 15
0
 def to_safeurl(self, url):
     # type: (str) -> Tuple[str, str]
     parts = parse_url(url)
     host = parts.host
     addr = socket.gethostbyname(host)
     safeurl = Url(
         scheme=parts.scheme,
         auth=parts.auth,
         host=addr,
         port=parts.port,
         path=parts.path,
         query=parts.query,
         fragment=parts.fragment,
     )
     block_internal_ips()(addr)
     return host, safeurl.url
Esempio n. 16
0
    def match(request: PreparedRequest) -> Tuple[bool, str]:
        reason = ""
        data = parse_url(request.url)
        request_query = data.query

        request_qsl = sorted(parse_qsl(request_query)) if request_query else {}
        matcher_qsl = sorted(parse_qsl(query)) if query else {}

        valid = not query if request_query is None else request_qsl == matcher_qsl

        if not valid:
            reason = "Query string doesn't match. {} doesn't match {}".format(
                _create_key_val_str(dict(request_qsl)),
                _create_key_val_str(dict(matcher_qsl)),
            )

        return valid, reason
Esempio n. 17
0
    def connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and 'http' in self.proxies:
            parts = parse_url(self.proxies['http'])
            proxy_host, proxy_port = parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(':')
        else:
            proxy_auth, proxy_port, proxy_host = None, None, None

        try:
            self.websocket = create_connection(ws_url,
                                               http_proxy_host=proxy_host,
                                               http_proxy_port=proxy_port,
                                               http_proxy_auth=proxy_auth)
            self.websocket.sock.setblocking(0)
        except Exception as e:
            raise SlackConnectionError(message=str(e))
Esempio n. 18
0
    def connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and 'http' in self.proxies:
            parts = parse_url(self.proxies['http'])
            proxy_host, proxy_port = parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(':')
        else:
            proxy_auth, proxy_port, proxy_host = None, None, None

        try:
            self.websocket = create_connection(ws_url,
                                               http_proxy_host=proxy_host,
                                               http_proxy_port=proxy_port,
                                               http_proxy_auth=proxy_auth)
            self.websocket.sock.setblocking(0)
        except Exception as e:
            raise SlackConnectionError(str(e))
Esempio n. 19
0
 def __init__(self, url, timeout=DEFAULT_TIMEOUT, cached=True):
     from requests.packages.urllib3.util.url import parse_url
     self._parsed_url = parse_url(url)
     self._url = url
     self._cache = self.queue_class() if cached else None
     self.host = str(self._parsed_url.host)
     if self._parsed_url.port:
         self.port = int(self._parsed_url.port)
     else:
         self.port = 12201
     if isinstance(timeout, six.string_types):
         timeout = int(timeout)
     self.timeout = timeout
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     try:
         self.sock.connect((self.host, int(self.port)))
     except IOError as e:
         raise RuntimeError('Could not connect via TCP: %s' % e)
Esempio n. 20
0
 def __init__(self, url, timeout=DEFAULT_TIMEOUT, cached=True):
     from requests.packages.urllib3.util.url import parse_url
     self._parsed_url = parse_url(url)
     self._url = url
     self._cache = self.queue_class() if cached else None
     self.host = str(self._parsed_url.host)
     if self._parsed_url.port:
         self.port = int(self._parsed_url.port)
     else:
         self.port = 12201
     if isinstance(timeout, six.string_types):
         timeout = int(timeout)
     self.timeout = timeout
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     try:
         self.sock.connect((self.host, int(self.port)))
     except IOError as e:
         raise RuntimeError('Could not connect via TCP: %s' % e)
Esempio n. 21
0
    def prepare_request(self, request):
        """
        Override request preparer so we can get the prepared content length, for tracking
        transfer sizes

        :param request: requests.Request
        :rtype: requests.PreparedRequest
        """
        prepped = super(SessionWrapper, self).prepare_request(request)
        parsed_url = parse_url(request.url)

        # we don't bother checking if the content length header exists here because we've probably
        # been given the request body as Morango sends bodies that aren't streamed, so the
        # underlying requests code will set it appropriately
        self.bytes_sent += len("{} {} HTTP/1.1".format(request.method,
                                                       parsed_url.path))
        self.bytes_sent += _length_of_headers(prepped.headers)
        self.bytes_sent += _headers_content_length(prepped.headers)

        return prepped
Esempio n. 22
0
    async def _connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and 'http' in self.proxies:
            parts = parse_url(self.proxies['http'])
            proxy_host, proxy_port = parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(':')
        else:
            proxy_auth, proxy_port, proxy_host = None, None, None

        try:
            self.websocket = await wsclient.connect(ws_url,
                                                    extra_headers={
                                                        "http_proxy_host":
                                                        proxy_host,
                                                        "http_proxy_port":
                                                        proxy_port,
                                                        "http_proxy_auth":
                                                        proxy_auth
                                                    })
        except Exception as e:
            raise SlackConnectionError(str(e))
Esempio n. 23
0
    def connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and 'http' in self.proxies:
            parts = parse_url(self.proxies['http'])
            proxy_host, proxy_port = parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(':')
        else:
            proxy_auth, proxy_port, proxy_host = None, None, None

        try:
            self.websocket = create_connection(ws_url,
                                               http_proxy_host=proxy_host,
                                               http_proxy_port=proxy_port,
                                               http_proxy_auth=proxy_auth)
            self.connected = True
            self.last_connected_at = time.time()
            logging.debug("RTM connected")
            self.websocket.sock.setblocking(0)
        except Exception as e:
            self.connected = False
            raise SlackConnectionError(message=str(e))
Esempio n. 24
0
    def connect_slack_websocket(self, ws_url):
        """Uses http proxy if available"""
        if self.proxies and 'http' in self.proxies:
            parts = parse_url(self.proxies['http'])
            proxy_type, proxy_host, proxy_port = parts.scheme, parts.host, parts.port
            auth = parts.auth
            proxy_auth = auth and auth.split(":")
        else:
            proxy_type, proxy_auth, proxy_port, proxy_host = 'http', None, None, None

        try:
            self.websocket = create_connection(ws_url,
                                               proxy_type=proxy_type,
                                               http_proxy_host=proxy_host,
                                               http_proxy_port=proxy_port,
                                               http_proxy_auth=proxy_auth)
            self.connected = True
            self.last_connected_at = time.time()
            logging.debug("RTM connected")
            self.websocket.sock.setblocking(0)
        except Exception as e:
            self.connected = False
            raise SlackConnectionError(message=str(e))
Esempio n. 25
0
File: url.py Progetto: Korijn/conda
def add_username_and_password(url, username, password):
    url_parts = parse_url(url)._asdict()
    url_parts['auth'] = username + ':' + quote(password, '')
    return Url(**url_parts).url
Esempio n. 26
0
def urlparse(url):
    if on_win and url.startswith('file:'):
        url.replace('\\', '/')
    return parse_url(url)
Esempio n. 27
0
def get_download_data(pypi_data, package, version, is_url, all_urls, noprompt, manual_url):
    """
    Get at least one valid *source* download URL or fail.

    Returns
    -------

    data : dict
        Summary of package information
    pypiurl : str
        Download URL of package, which may or may not actually be from PyPI.
    filename : str
        Name of file; used to check cache
    digest : dict
        Key is type of checksum, value is the checksum.
    """
    data = pypi_data['info'] if not is_url else {}

    # PyPI will typically have several downloads (source, wheels) for one
    # package/version.
    urls = [url for url in pypi_data['releases'][version]] if not is_url else [package]

    if not is_url and not all_urls:
        # Try to find source urls
        urls = [url for url in urls if url['packagetype'] == 'sdist']

    if not urls:
        # Try harder for a download location
        if data.get('download_url'):
            urls = [defaultdict(str, {'url': data['download_url']})]
            if not urls[0]['url']:
                # The package doesn't have a url, or maybe it only has a wheel.
                sys.exit("Error: Could not build recipe for %s. "
                    "Could not find any valid urls." % package)
            U = parse_url(urls[0]['url'])
            if not U.path:
                sys.exit("Error: Could not parse url for %s: %s" %
                    (package, U))
            urls[0]['filename'] = U.path.rsplit('/')[-1]
            fragment = U.fragment or ''
            digest = digest_from_fragment(fragment)
        else:
            sys.exit("Error: No source urls found for %s" % package)
    if len(urls) > 1 and not noprompt:
        print("More than one source version is available for %s:" %
                package)
        if manual_url:
            for i, url in enumerate(urls):
                print("%d: %s (%s) %s" % (i, url['url'],
                        human_bytes(url['size']), url['comment_text']))
            n = int(input("which version should i use? "))
        else:
            print("Using the one with the least source size")
            print("use --manual-url to override this behavior")
            _, n = min([(url['size'], i)
                                for (i, url) in enumerate(urls)])
    else:
        n = 0

    if not is_url:
        # Found a location from PyPI.
        url = urls[n]
        pypiurl = url['url']
        print("Using url %s (%s) for %s." % (pypiurl,
            human_bytes(url['size'] or 0), package))
        # List of digests we might get in order of preference
        for p in POSSIBLE_DIGESTS:
            try:
                if url['digests'][p]:
                    digest = (p, url['digests'][p])
                    break
            except KeyError:
                continue
        else:
            # That didn't work, even though as of 7/17/2017 some packages
            # have a 'digests' entry.
            # As a last-ditch effort, try for the md5_digest entry.
            try:
                digest = ('md5', url['md5_digest'])
            except KeyError:
                # Give up
                digest = ()
        filename = url['filename'] or 'package'
    else:
        # User provided a URL, try to use it.
        print("Using url %s" % package)
        pypiurl = package
        U = parse_url(package)
        digest = digest_from_fragment(U.fragment)
        # TODO: 'package' won't work with unpack()
        filename = U.path.rsplit('/', 1)[-1] or 'package'

    return (data, pypiurl, filename, digest)
Esempio n. 28
0
 def urlident(self):
     """Used to order HTTP requests by URL."""
     url = parse_url(self.subscriber.url)
     return url.host, url.port or 80, url.scheme or 'http'
Esempio n. 29
0
def add_username_and_password(url, username, password):
    url_parts = parse_url(url)._asdict()
    url_parts['auth'] = username + ':' + quote(password, '')
    return Url(**url_parts).url
Esempio n. 30
0
 def urlident(self):
     # type: () -> Tuple[str, int, str]
     """Used to order HTTP requests by URL."""
     url = parse_url(self.subscriber.url)
     return url.scheme or 'http', url.port or 80, url.host
Esempio n. 31
0
def get_download_data(pypi_data, package, version, is_url, all_urls, noprompt,
                      manual_url):
    """
    Get at least one valid *source* download URL or fail.

    Returns
    -------

    data : dict
        Summary of package information
    pypiurl : str
        Download URL of package, which may or may not actually be from PyPI.
    filename : str
        Name of file; used to check cache
    digest : dict
        Key is type of checksum, value is the checksum.
    """
    data = pypi_data['info'] if not is_url else {}

    # PyPI will typically have several downloads (source, wheels) for one
    # package/version.
    urls = [url for url in pypi_data['releases'][version]
            ] if not is_url else [package]

    if not is_url and not all_urls:
        # Try to find source urls
        urls = [url for url in urls if url['packagetype'] == 'sdist']

    if not urls:
        # Try harder for a download location
        if data.get('download_url'):
            urls = [defaultdict(str, {'url': data['download_url']})]
            if not urls[0]['url']:
                # The package doesn't have a url, or maybe it only has a wheel.
                sys.exit("Error: Could not build recipe for %s. "
                         "Could not find any valid urls." % package)
            U = parse_url(urls[0]['url'])
            if not U.path:
                sys.exit("Error: Could not parse url for %s: %s" %
                         (package, U))
            urls[0]['filename'] = U.path.rsplit('/')[-1]
            fragment = U.fragment or ''
            digest = fragment.split("=")
        else:
            sys.exit("Error: No source urls found for %s" % package)
    if len(urls) > 1 and not noprompt:
        print("More than one source version is available for %s:" % package)
        if manual_url:
            for i, url in enumerate(urls):
                print("%d: %s (%s) %s" %
                      (i, url['url'], human_bytes(
                          url['size']), url['comment_text']))
            n = int(input("which version should i use? "))
        else:
            print("Using the one with the least source size")
            print("use --manual-url to override this behavior")
            _, n = min([(url['size'], i) for (i, url) in enumerate(urls)])
    else:
        n = 0

    if not is_url:
        # Found a location from PyPI.
        url = urls[n]
        pypiurl = url['url']
        print("Using url %s (%s) for %s." %
              (pypiurl, human_bytes(url['size'] or 0), package))

        if url['digests']['sha256']:
            digest = ('sha256', url['digests']['sha256'])
        else:
            # That didn't work, even though as of 7/17/2017 some packages
            # have a 'digests' entry.
            # As a last-ditch effort, try for the md5_digest entry.
            digest = ()
        filename = url['filename'] or 'package'
    else:
        # User provided a URL, try to use it.
        print("Using url %s" % package)
        pypiurl = package
        U = parse_url(package)
        digest = U.fragment.split("=")
        # TODO: 'package' won't work with unpack()
        filename = U.path.rsplit('/', 1)[-1] or 'package'

    return (data, pypiurl, filename, digest)
Esempio n. 32
0
def add_username_and_pass_to_url(url, username, passwd):
    url_obj = parse_url(url)
    url_obj.auth = username + ':' + quote(passwd, '')
    return url_obj.url
Esempio n. 33
0
 def urlident(self):
     """Used to order HTTP requests by URL."""
     url = parse_url(self.subscriber.url)
     return url.host, url.port or 80, url.scheme or 'http'
Esempio n. 34
0
def urlparse(url):
    return parse_url(url)
Esempio n. 35
0
 def urlident(self):
     # type: () -> Tuple[str, int, str]
     """Used to order HTTP requests by URL."""
     url = parse_url(self.subscriber.url)
     return url.scheme or 'http', url.port or 80, url.host
Esempio n. 36
0
def main(args, parser):
    proxies = get_proxy_servers()

    if proxies:
        transport = RequestsTransport()
    else:
        transport = None
    client = ServerProxy(args.pypi_url, transport=transport)
    package_dicts = {}
    [output_dir] = args.output_dir

    all_packages = client.list_packages()
    all_packages_lower = [i.lower() for i in all_packages]

    while args.packages:
        [output_dir] = args.output_dir

        package = args.packages.pop()

        is_url = ':' in package

        if not is_url:
            dir_path = join(output_dir, package.lower())
            if exists(dir_path):
                raise RuntimeError("directory already exists: %s" % dir_path)
        d = package_dicts.setdefault(package,
            {
                'packagename': package.lower(),
                'run_depends': '',
                'build_depends': '',
                'entry_points': '',
                'build_comment': '# ',
                'test_commands': '',
                'usemd5': '',
                'test_comment': '',
                'entry_comment': '# ',
                'egg_comment': '# ',
                'summary_comment': '',
                'home_comment': '',
            })
        if is_url:
            del d['packagename']

        if is_url:
            d['version'] = 'UNKNOWN'
        else:
            if args.version:
                [version] = args.version
                versions = client.package_releases(package, True)
                if version not in versions:
                    sys.exit("Error: Version %s of %s is not available on PyPI."
                             % (version, package))
                d['version'] = version
            else:
                versions = client.package_releases(package)
                if not versions:
                    # The xmlrpc interface is case sensitive, but the index itself
                    # is apparently not (the last time I checked,
                    # len(set(all_packages_lower)) == len(set(all_packages)))
                    if package.lower() in all_packages_lower:
                        print("%s not found, trying %s" % (package, package.capitalize()))
                        args.packages.append(all_packages[all_packages_lower.index(package.lower())])
                        del package_dicts[package]
                        continue
                    sys.exit("Error: Could not find any versions of package %s" %
                             package)
                if len(versions) > 1:
                    print("Warning, the following versions were found for %s" %
                          package)
                    for ver in versions:
                        print(ver)
                    print("Using %s" % versions[0])
                    print("Use --version to specify a different version.")
                d['version'] = versions[0]

        data = client.release_data(package, d['version']) if not is_url else None
        urls = client.release_urls(package, d['version']) if not is_url else [package]
        if not is_url and not args.all_urls:
            # Try to find source urls
            urls = [url for url in urls if url['python_version'] == 'source']
        if not urls:
            if 'download_url' in data:
                urls = [defaultdict(str, {'url': data['download_url']})]
                U = parse_url(urls[0]['url'])
                urls[0]['filename'] = U.path.rsplit('/')[-1]
                fragment = U.fragment or ''
                if fragment.startswith('md5='):
                    d['usemd5'] = ''
                    d['md5'] = fragment[len('md5='):]
                else:
                    d['usemd5'] = '#'
            else:
                sys.exit("Error: No source urls found for %s" % package)
        if len(urls) > 1 and not args.noprompt:
            print("More than one source version is available for %s:" %
                  package)
            for i, url in enumerate(urls):
                print("%d: %s (%s) %s" % (i, url['url'],
                                          human_bytes(url['size']),
                                          url['comment_text']))
            n = int(input("Which version should I use? "))
        else:
            n = 0

        if not is_url:
            print("Using url %s (%s) for %s." % (urls[n]['url'],
                human_bytes(urls[n]['size'] or 0), package))
            d['pypiurl'] = urls[n]['url']
            d['md5'] = urls[n]['md5_digest']
            d['filename'] = urls[n]['filename']
        else:
            print("Using url %s" % package)
            d['pypiurl'] = package
            U = parse_url(package)
            if U.fragment.startswith('md5='):
                d['usemd5'] = ''
                d['md5'] = U.fragment[len('md5='):]
            else:
                d['usemd5'] = '#'
                d['md5'] = ''
            # TODO: 'package' won't work with unpack()
            d['filename'] = U.path.rsplit('/', 1)[-1] or 'package'

        if is_url:
            d['import_tests'] = 'PLACEHOLDER'
        else:
            d['import_tests'] = valid(package).lower()

        get_package_metadata(args, package, d, data)

        if d['import_tests'] == '':
            d['import_comment'] = '# '
        else:
            d['import_comment'] = ''
            d['import_tests'] = INDENT + d['import_tests']

        if d['entry_comment'] == d['import_comment'] == '# ':
            d['test_comment'] = '# '

    for package in package_dicts:
        d = package_dicts[package]
        name = d['packagename']
        makedirs(join(output_dir, name))
        print("Writing recipe for %s" % package.lower())
        with open(join(output_dir, name, 'meta.yaml'), 'w') as f:
            f.write(PYPI_META.format(**d))
        with open(join(output_dir, name, 'build.sh'), 'w') as f:
            f.write(PYPI_BUILD_SH.format(**d))
        with open(join(output_dir, name, 'bld.bat'), 'w') as f:
            f.write(PYPI_BLD_BAT.format(**d))

    print("Done")
Esempio n. 37
0
def urlparse(url):
    return parse_url(url)
Esempio n. 38
0
def add_username_and_pass_to_url(url, username, passwd):
    url_obj = parse_url(url)
    url_obj.auth = username + ':' + quote(passwd, '')
    return url_obj.url
Esempio n. 39
0
File: url.py Progetto: Korijn/conda
def urlparse(url):
    if on_win and url.startswith('file:'):
        url.replace('\\', '/')
    return parse_url(url)
Esempio n. 40
0
    def __init__(self, method, url, params=None, data=None, headers=None, cookies=None,
                 files=None, timeout=None, connection_timeout=None, allow_redirects=True,
                 max_redirects=5, proxies=None, auth=None, network_interface=None, use_gzip=True,
                 validate_cert=False, ca_certs=None, cert=None, debug=False, user_agent=None,
                 ip_v6=False, options=None, netrc=False, netrc_file=None, encode_query=None, opener=None, **kwargs):
        """A single HTTP / HTTPS request

        Arguments:
        - `url`: (string) resource url
        - `method`: (string) one of `self.SUPPORTED_METHODS`
        - `data`: (dict, duple, string) data to send as Content-Disposition form-data
        - `params`: (dict, tuple) of GET params (?param1=value1&param2=value2)
        - `headers`: (dict, tuple) of request headers
        - `cookies`: (dict, tuple or CookieJar) of cookies
        - `files`: (dict, tuple or list) of files
           Example:
               (('field_file_name', '/path/to/file.txt'),
               ('field_file_name', io.open('/path/to/file.txt')),
               ('multiple_files_field', (io.open("/path/to/file.1.txt"), io.open("/path/to/file.1.txt"))),
               ('multiple_files_field', ("/path/to/file.1.txt", "/path/to/file.1.txt")))
        - `timeout`: (float) connection time out
        - `connection_timeout`: (float)
        - `allow_redirects`: (bool) follow redirects parametr
        - `proxy`: (dict, tuple or list) of proxies
           Examples:
               ('http', ('127.0.0.1', 9050))
               ('http', ('127.0.0.1', 9050, ('username', 'password')))
        - `auth`: (dict, tuple or list) for resource base auth
        - `network_interface`: (str) Pepform an operation using a specified interface.
           You can enter interface name, IP address or host name.
        - `use_gzip`: (bool) accept gzipped data
        - `validate_cert`: (bool) validate server certificate
        - `ca_certs`: tells curl to use the specified certificate file to verify the peer.
        - `cert`: (string) tells curl to use the specified certificate file
           when getting a file with HTTPS.
        - `debug`: (bool) use for `pycurl.DEBUGFUNCTION`
        - `user_agent`: (string) user agent
        - `ip_v6`: (bool) use ipv6 protocol
        - `options`: (tuple, list) low level pycurl options using
        """
        self._url = url
        if not method or not isinstance(method, str):
            raise InterfaceError("method argument must be string")

        if method.upper() not in self.SUPPORTED_METHODS:
            raise InvalidMethod("cURL do not support %s method" % method.upper())

        self._method = method.upper()

        self._user_agent = user_agent

        self._headers = data_wrapper(headers)

        if files is not None:
            self._files = make_curl_post_files(files)
        else:
            self._files = None

        self._params = data_wrapper(params)

        # String, dict, tuple, list
        if isinstance(data, (str, bytes, type(None))):
            self._data = data
        else:
            self._data = data_wrapper(data)

        if isinstance(cookies, CookieJar):
            self._cookies = cookies
        elif isinstance(cookies, (tuple, dict)):
            self._cookies = to_cookiejar(cookies)
        else:
            self._cookies = None
        if self._cookies is not None:
            self.cookie_file = tempfile.NamedTemporaryFile()

        if isinstance(proxies, type(None)):
            self._proxy = None
        elif isinstance(proxies, dict):
            if proxies.get('https'):

                proxy = parse_url(proxies.get('https'))

                if not proxy.port:
                    port = port_by_scheme.get(proxy.scheme, 80)
                    proxy = proxy._replace(port=port)
                assert proxy.scheme in ("http", "https"), \
                    'Not supported proxy scheme %s' % proxy.scheme
                self._proxy = ('https', (proxy.host, proxy.port), proxy.auth)
                # print(self._proxy)

            elif proxies.get('socks5'):

                proxy = parse_url(proxies.get('socks5'))

                if not proxy.port:
                    port = port_by_scheme.get(proxy.scheme, 1080)
                    proxy = proxy._replace(port=port)
                assert proxy.scheme in ("socks5",), \
                    'Not supported proxy scheme %s' % proxy.scheme
                self._proxy = ('socks5', (proxy.host, proxy.port), proxy.auth)



        if not isinstance(network_interface, (str, type(None))):
            raise InterfaceError("Network interface argument must be string or None")

        self._network_interface = network_interface

        if isinstance(auth, AuthManager):
            self._auth = auth
        elif isinstance(auth, tuple):
            self._auth = BasicAuth(*auth)
        elif auth is None:
            self._auth = None
        else:
            raise ValueError("auth must be list, tuple or dict, not %s" % type(auth))

        # follow by location header field
        self._allow_redirects = allow_redirects
        self._max_redirects = max_redirects

        self._timeout = int(timeout or DEFAULT_TIME_OUT)
        self._connection_timeout = connection_timeout

        self._use_gzip = use_gzip

        # Certificates
        self._validate_cert = validate_cert
        self._ca_certs = ca_certs
        self._cert = cert
        self._start_time = time.time()
        self._debug_curl = debug
        self._ip_v6 = ip_v6

        self.response = None

        if options is None:
            self._options = None
        elif isinstance(options, (list, tuple)):
            self._options = data_wrapper(options)
        else:
            raise InterfaceError("options must be None, list or tuple")

        self._curl = None

        self.body_output = io.StringIO()
        self.headers_output = io.StringIO()

        self._netrc = netrc
        self._netrc_file = None

        self._encode_query = encode_query

        if opener:
            self._opener = opener
        else:
            self._opener = None