def __call__(self, req): parsed = urllib_parse.urlparse(req.url) # Get the netloc without any embedded credentials netloc = parsed.netloc.rsplit("@", 1)[-1] # Set the url of the request to the url without any credentials req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:]) # Use any stored credentials that we have for this netloc username, password = self.passwords.get(netloc, (None, None)) # Extract credentials embedded in the url if we have none stored if username is None: username, password = self.parse_credentials(parsed.netloc) # Get creds from netrc if we still don't have them if username is None and password is None: netrc_auth = get_netrc_auth(req.url) username, password = netrc_auth if netrc_auth else (None, None) if username or password: # Store the username and password self.passwords[netloc] = (username, password) # Send the basic auth with this request req = HTTPBasicAuth(username or "", password or "")(req) # Attach a hook to handle 401 responses req.register_hook("response", self.handle_401) return req
def __init__(self, requirement_string): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( 'Parse error at "{0!r}": {1}'.format( requirement_string[e.loc : e.loc + 8], e.msg ) ) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if parsed_url.scheme == "file": if urlparse.urlunparse(parsed_url) != req.url: raise InvalidRequirement("Invalid URL given") elif not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc ): raise InvalidRequirement("Invalid URL: {0}".format(req.url)) self.url = req.url else: self.url = None self.extras = set(req.extras.asList() if req.extras else []) self.specifier = SpecifierSet(req.specifier) self.marker = req.marker if req.marker else None
def handle_401(self, resp, **kwargs): # We only care about 401 responses, anything else we want to just # pass through the actual response if resp.status_code != 401: return resp # We are not able to prompt the user so simply return the response if not self.prompting: return resp parsed = urllib_parse.urlparse(resp.url) # Prompt the user for a new username and password username = six.moves.input("User for %s: " % parsed.netloc) password = getpass.getpass("Password: "******"", password or "")(resp.request) # Send our new request new_resp = resp.connection.send(req, **kwargs) new_resp.history.append(resp) return new_resp
def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in SECURE_ORIGINS + self.secure_origins: # Check to see if the protocol matches if origin[0] != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address( origin[1] if (isinstance(origin[1], six.text_type) or origin[1] is None) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) else secure_origin[1].decode("utf8") ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if origin[1] != secure_origin[1] and secure_origin[1] != "*": continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None: continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS it " "is recommended to use HTTPS instead, otherwise you may silence " "this warning and allow it anyways with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False
def _get_html_page(link, session=None): # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] if session is None: raise TypeError( "_get_html_page() missing 1 required keyword argument: 'session'" ) url = link.url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: logger.debug('Cannot look at %s URL %s', vcs_scheme, link) return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) try: resp = _get_html_response(url, session=session) except _NotHTTP as exc: logger.debug( 'Skipping page %s because it looks like an archive, and cannot ' 'be checked by HEAD.', link, ) except _NotHTML as exc: logger.debug( 'Skipping page %s because the %s request got Content-Type: %s', link, exc.request_desc, exc.content_type, ) except requests.HTTPError as exc: _handle_get_page_fail(link, exc) except RetryError as exc: _handle_get_page_fail(link, exc) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) _handle_get_page_fail(link, reason, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, "connection error: %s" % exc) except requests.Timeout: _handle_get_page_fail(link, "timed out") else: return HTMLPage(resp.content, resp.url, resp.headers) return None
def _clean_link(url): # type: (str) -> str """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" # Split the URL into parts according to the general structure # `scheme://netloc/path;parameters?query#fragment`. Note that the # `netloc` can be empty and the URI will then refer to a local # filesystem path. result = urllib_parse.urlparse(url) # In both cases below we unquote prior to quoting to make sure # nothing is double quoted. if result.netloc == "": # On Windows the path part might contain a drive letter which # should not be quoted. On Linux where drive letters do not # exist, the colon should be quoted. We rely on urllib.request # to do the right thing here. path = urllib_request.pathname2url( urllib_request.url2pathname(result.path)) else: path = urllib_parse.quote(urllib_parse.unquote(result.path)) return urllib_parse.urlunparse(result._replace(path=path))
def _get_html_page(link, session=None): if session is None: raise TypeError( "_get_html_page() missing 1 required keyword argument: 'session'" ) url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = _get_content_type(url, session=session) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack map.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'map.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = HTMLPage(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: _handle_get_page_fail(link, exc, url) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) _handle_get_page_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, "connection error: %s" % exc, url) except requests.Timeout: _handle_get_page_fail(link, "timed out", url) else: return inst
def get_page(cls, link, skip_archives=True, session=None): if session is None: raise TypeError( "get_page() missing 1 required keyword argument: 'session'" ) url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: if skip_archives: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = cls._get_content_type( url, session=session, ) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", "Cache-Control": "max-age=600", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = cls(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: cls._handle_fail(link, exc, url) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) cls._handle_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: cls._handle_fail(link, "connection error: %s" % exc, url) except requests.Timeout: cls._handle_fail(link, "timed out", url) else: return inst
def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'" ) # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available." ) break
def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in (SECURE_ORIGINS + self.secure_origins): # Check to see if the protocol matches if origin[0] != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address( origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) else secure_origin[1].decode("utf8") ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if origin[1] != secure_origin[1] and secure_origin[1] != "*": continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue # If we've gotten here, then this origin matches the current # secure origin and we should break out of the loop and continue # on. break else: # If the loop successfully completed without a break, that means # that the origin we are testing is not a secure origin. logger.warning( "This repository located at %s is not a trusted host, if " "this repository is available via HTTPS it is recommend to " "use HTTPS instead, otherwise you may silence this warning " "with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) warnings.warn( "Implicitly allowing locations which are not hosted at a " "secure origin is deprecated and will require the use of " "--trusted-host in the future.", RemovedInPip7Warning, )
def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" platform = get_platform() version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, 'not a file') return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link( link, 'unsupported archive format: %s' % ext) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( link, 'No binaries permitted for %s' % search.supplied) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if (pkg_resources.safe_name(wheel.name).lower() != search.canonical): self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if not wheel.supported(): self._log_skipped_link( link, 'it is not compatible with this Python') return # This is a dirty hack to prevent installing Binary Wheels from # PyPI unless it is a Windows or Mac Binary Wheel. This is # paired with a change to PyPI disabling uploads for the # same. Once we have a mechanism for enabling support for # binary wheels on linux that deals with the inherent problems # of binary distribution this can be removed. comes_from = getattr(link, "comes_from", None) if ( ( not platform.startswith('win') and not platform.startswith('macosx') and not platform == 'cli' ) and comes_from is not None and urllib_parse.urlparse( comes_from.url ).netloc.endswith(PyPI.netloc)): if not wheel.supported(tags=supported_tags_noarch): self._log_skipped_link( link, "it is a pypi-hosted binary " "Wheel on an unsupported platform", ) return version = wheel.version # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, 'No sources permitted for %s' % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if (link.internal is not None and not link.internal and not normalize_name(search.supplied).lower() in self.allow_external and not self.allow_all_external): # We have a link that we are sure is external, so we should skip # it unless we are allowing externals self._log_skipped_link(link, 'it is externally hosted') self.need_warn_external = True return if (link.verifiable is not None and not link.verifiable and not (normalize_name(search.supplied).lower() in self.allow_unverified)): # We have a link that we are sure we cannot verify its integrity, # so we should skip it unless we are allowing unsafe installs # for this requirement. self._log_skipped_link( link, 'it is an insecure and unverifiable file') self.need_warn_unverified = True return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link( link, 'Python version is incorrect') return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link)
def is_secure_origin(self, location): # type: (Link) -> bool # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin_protocol, origin_host, origin_port = ( parsed.scheme, parsed.hostname, parsed.port, ) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) origin_protocol = origin_protocol.rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in self.iter_secure_origins(): secure_protocol, secure_host, secure_port = secure_origin if origin_protocol != secure_protocol and secure_protocol != "*": continue try: addr = ipaddress.ip_address( None if origin_host is None else six.ensure_text(origin_host) ) network = ipaddress.ip_network( six.ensure_text(secure_host) ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if ( origin_host and origin_host.lower() != secure_host.lower() and secure_host != "*" ): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port matches. if ( origin_port != secure_port and secure_port != "*" and secure_port is not None ): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS we " "recommend you use HTTPS instead, otherwise you may silence " "this warning and allow it anyway with '--trusted-host %s'.", origin_host, origin_host, ) return False
def _link_package_versions(self, link, search_name): """ Return an iterable of triples (pkg_resources_version_key, link, python_version) that can be extracted from the given link. Meant to be overridden by subclasses, not called by clients. """ platform = get_platform() version = None if link.egg_fragment: egg_info = link.egg_fragment else: egg_info, ext = link.splitext() if not ext: if link not in self.logged_links: logger.debug('Skipping link %s; not a file', link) self.logged_links.add(link) return if egg_info.endswith('.tar'): # Special double-extension case: egg_info = egg_info[:-4] ext = '.tar' + ext if ext not in self._known_extensions(): if link not in self.logged_links: logger.debug( 'Skipping link %s; unknown archive format: %s', link, ext, ) self.logged_links.add(link) return if "macosx10" in link.path and ext == '.zip': if link not in self.logged_links: logger.debug('Skipping link %s; macosx10 one', link) self.logged_links.add(link) return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: logger.debug( 'Skipping %s because the wheel filename is invalid', link) return if (pkg_resources.safe_name(wheel.name).lower() != pkg_resources.safe_name(search_name).lower()): logger.debug( 'Skipping link %s; wrong project name (not %s)', link, search_name, ) return if not wheel.supported(): logger.debug( 'Skipping %s because it is not compatible with this ' 'Python', link, ) return # This is a dirty hack to prevent installing Binary Wheels from # PyPI unless it is a Windows or Mac Binary Wheel. This is # paired with a change to PyPI disabling uploads for the # same. Once we have a mechanism for enabling support for # binary wheels on linux that deals with the inherent problems # of binary distribution this can be removed. comes_from = getattr(link, "comes_from", None) if ((not platform.startswith('win') and not platform.startswith('macosx') and not platform == 'cli') and comes_from is not None and urllib_parse.urlparse( comes_from.url).netloc.endswith(PyPI.netloc)): if not wheel.supported(tags=supported_tags_noarch): logger.debug( "Skipping %s because it is a pypi-hosted binary " "Wheel on an unsupported platform", link, ) return version = wheel.version if not version: version = self._egg_info_matches(egg_info, search_name, link) if version is None: logger.debug( 'Skipping link %s; wrong project name (not %s)', link, search_name, ) return if (link.internal is not None and not link.internal and not normalize_name(search_name).lower() in self.allow_external and not self.allow_all_external): # We have a link that we are sure is external, so we should skip # it unless we are allowing externals logger.debug("Skipping %s because it is externally hosted.", link) self.need_warn_external = True return if (link.verifiable is not None and not link.verifiable and not (normalize_name(search_name).lower() in self.allow_unverified)): # We have a link that we are sure we cannot verify its integrity, # so we should skip it unless we are allowing unsafe installs # for this requirement. logger.debug( "Skipping %s because it is an insecure and unverifiable file.", link, ) self.need_warn_unverified = True return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: logger.debug('Skipping %s because Python version is incorrect', link) return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search_name, version, link)
url = link.url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: <<<<<<< HEAD logger.warning('Cannot look at %s URL %s because it does not support ' 'lookup as web pages.', vcs_scheme, link) ======= logger.debug('Cannot look at %s URL %s', vcs_scheme, link) >>>>>>> b66a76afa15ab74019740676a52a071b85ed8f71 return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) try: resp = _get_html_response(url, session=session) except _NotHTTP: <<<<<<< HEAD logger.warning( 'Skipping page %s because it looks like an archive, and cannot ' 'be checked by a HTTP HEAD request.', link,
def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" platform = get_platform() version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, 'not a file') return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link(link, 'unsupported archive format: %s' % ext) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( link, 'No binaries permitted for %s' % search.supplied) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if (pkg_resources.safe_name(wheel.name).lower() != search.canonical): self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if not wheel.supported(): self._log_skipped_link( link, 'it is not compatible with this Python') return # This is a dirty hack to prevent installing Binary Wheels from # PyPI unless it is a Windows or Mac Binary Wheel. This is # paired with a change to PyPI disabling uploads for the # same. Once we have a mechanism for enabling support for # binary wheels on linux that deals with the inherent problems # of binary distribution this can be removed. comes_from = getattr(link, "comes_from", None) if ((not platform.startswith('win') and not platform.startswith('macosx') and not platform == 'cli') and comes_from is not None and urllib_parse.urlparse( comes_from.url).netloc.endswith(PyPI.netloc)): if not wheel.supported(tags=supported_tags_noarch): self._log_skipped_link( link, "it is a pypi-hosted binary " "Wheel on an unsupported platform", ) return version = wheel.version # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, 'No sources permitted for %s' % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if (link.internal is not None and not link.internal and not normalize_name(search.supplied).lower() in self.allow_external and not self.allow_all_external): # We have a link that we are sure is external, so we should skip # it unless we are allowing externals self._log_skipped_link(link, 'it is externally hosted') self.need_warn_external = True return if (link.verifiable is not None and not link.verifiable and not (normalize_name(search.supplied).lower() in self.allow_unverified)): # We have a link that we are sure we cannot verify its integrity, # so we should skip it unless we are allowing unsafe installs # for this requirement. self._log_skipped_link(link, 'it is an insecure and unverifiable file') self.need_warn_unverified = True return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link(link, 'Python version is incorrect') return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link)
def __init__(self, find_links, index_urls, allow_external=(), allow_unverified=(), allow_all_external=False, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'") # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Do we allow (safe and verifiable) externally hosted files? self.allow_external = set(normalize_name(n) for n in allow_external) # Which names are allowed to install insecure and unverifiable files? self.allow_unverified = set( normalize_name(n) for n in allow_unverified) # Anything that is allowed unverified is also allowed external self.allow_external |= self.allow_unverified # Do we allow all (safe and verifiable) externally hosted files? self.allow_all_external = allow_all_external # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Stores if we ignored any external links so that we can instruct # end users how to install them if no distributions are available self.need_warn_external = False # Stores if we ignored any unsafe links so that we can instruct # end users how to install them if no distributions are available self.need_warn_unverified = False # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available.") break
def __init__(self, index_url, session, use_datetime=False): # type: (str, PipSession, bool) -> None xmlrpc_client.Transport.__init__(self, use_datetime) index_parts = urllib_parse.urlparse(index_url) self._scheme = index_parts.scheme self._session = session
def _validate_secure_origin(self, logger, location): # type: (Logger, Link) -> bool # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) protocol = origin[0].rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in (SECURE_ORIGINS + self.secure_origins): if protocol != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address( origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) # setting secure_origin[1] to proper Union[bytes, str] # creates problems in other places else secure_origin[1].decode("utf8") # type: ignore ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if (origin[1] and origin[1].lower() != secure_origin[1].lower() and secure_origin[1] != "*"): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS we " "recommend you use HTTPS instead, otherwise you may silence " "this warning and allow it anyway with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False
def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None, platform=None, versions=None, abi=None, implementation=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param platform: A string or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platform passed in. These packages will only be downloaded for distribution: they will not be built locally. :param versions: A list of strings or None. This is passed directly to pep425tags.py in the get_supported() method. :param abi: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param implementation: A string or None. This is passed directly to pep425tags.py in the get_supported() method. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'") # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # The valid tags to check potential found wheel candidates against self.valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available.") break
def get_page(cls, link, skip_archives=True, session=None): if session is None: raise TypeError("get_page() missing 1 required keyword argument: 'session'") url = link.url url = url.split("#", 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in "+:": logger.debug("Cannot look at %s URL %s", scheme, link) return None try: if skip_archives: filename = link.filename for bad_ext in [".tar", ".tar.gz", ".tar.bz2", ".tgz", ".zip"]: if filename.endswith(bad_ext): content_type = cls._get_content_type(url, session=session) if content_type.lower().startswith("text/html"): break else: logger.debug("Skipping page %s because of Content-Type: %s", link, content_type) return logger.debug("Getting page %s", url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = urllib_parse.urlparse(url) if scheme == "file" and os.path.isdir(urllib_request.url2pathname(path)): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith("/"): url += "/" url = urllib_parse.urljoin(url, "index.html") logger.debug(" file: URL is directory, getting %s", url) resp = session.get(url, headers={"Accept": "text/html", "Cache-Control": "max-age=600"}) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get("Content-Type", "unknown") if not content_type.lower().startswith("text/html"): logger.debug("Skipping page %s because of Content-Type: %s", link, content_type) return inst = cls(resp.content, resp.url, resp.headers, trusted=link.trusted) except requests.HTTPError as exc: level = 2 if exc.response.status_code == 404 else 1 cls._handle_fail(link, exc, url, level=level) except requests.ConnectionError as exc: cls._handle_fail(link, "connection error: %s" % exc, url) except requests.Timeout: cls._handle_fail(link, "timed out", url) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " "%s" % exc cls._handle_fail(link, reason, url, level=2, meth=logger.info) else: return inst
def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) protocol = origin[0].rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in (SECURE_ORIGINS + self.secure_origins): if protocol != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address(origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8")) network = ipaddress.ip_network(secure_origin[1] if isinstance( secure_origin[1], six.text_type) else secure_origin[1]. decode("utf8")) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if (origin[1] and origin[1].lower() != secure_origin[1].lower() and secure_origin[1] != "*"): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS we " "recommend you use HTTPS instead, otherwise you may silence " "this warning and allow it anyway with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False
def _get_html_page(link, session=None): if session is None: raise TypeError( "_get_html_page() missing 1 required keyword argument: 'session'" ) url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = _get_content_type(url, session=session) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = HTMLPage(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: _handle_get_page_fail(link, exc, url) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) _handle_get_page_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, "connection error: %s" % exc, url) except requests.Timeout: _handle_get_page_fail(link, "timed out", url) else: return inst
def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, 'not a file') return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link(link, 'unsupported archive format: %s' % ext) return if (("binary" not in search.formats and ext == wheel_ext) and (urllib_parse.urlparse(link.url).scheme != 'file' or "binary_local" not in search.formats)): self._log_skipped_link( link, 'No binaries permitted for %s' % search.supplied) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if canonicalize_name(wheel.name) != search.canonical: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if not wheel.supported(self.valid_tags): self._log_skipped_link( link, 'it is not compatible with this Python') return version = wheel.version # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, 'No sources permitted for %s' % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link(link, 'Python version is incorrect') return try: support_this_python = check_requires_python(link.requires_python) except specifiers.InvalidSpecifier: logger.debug("Package %s has an invalid Requires-Python entry: %s", link.filename, link.requires_python) support_this_python = True if not support_this_python: logger.debug( "The package %s is incompatible with the python" "version in use. Acceptable python versions are:%s", link, link.requires_python) return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link)
def _link_package_versions(self, link, search_name): """ Return an iterable of triples (pkg_resources_version_key, link, python_version) that can be extracted from the given link. Meant to be overridden by subclasses, not called by clients. """ platform = get_platform() version = None if link.egg_fragment: egg_info = link.egg_fragment else: egg_info, ext = link.splitext() if not ext: if link not in self.logged_links: logger.debug('Skipping link %s; not a file', link) self.logged_links.add(link) return if egg_info.endswith('.tar'): # Special double-extension case: egg_info = egg_info[:-4] ext = '.tar' + ext if ext not in self._known_extensions(): if link not in self.logged_links: logger.debug( 'Skipping link %s; unknown archive format: %s', link, ext, ) self.logged_links.add(link) return if "macosx10" in link.path and ext == '.zip': if link not in self.logged_links: logger.debug('Skipping link %s; macosx10 one', link) self.logged_links.add(link) return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: logger.debug( 'Skipping %s because the wheel filename is invalid', link ) return if (pkg_resources.safe_name(wheel.name).lower() != pkg_resources.safe_name(search_name).lower()): logger.debug( 'Skipping link %s; wrong project name (not %s)', link, search_name, ) return if not wheel.supported(): logger.debug( 'Skipping %s because it is not compatible with this ' 'Python', link, ) return # This is a dirty hack to prevent installing Binary Wheels from # PyPI unless it is a Windows or Mac Binary Wheel. This is # paired with a change to PyPI disabling uploads for the # same. Once we have a mechanism for enabling support for # binary wheels on linux that deals with the inherent problems # of binary distribution this can be removed. comes_from = getattr(link, "comes_from", None) if ( ( not platform.startswith('win') and not platform.startswith('macosx') and not platform == 'cli' ) and comes_from is not None and urllib_parse.urlparse( comes_from.url ).netloc.endswith(PyPI.netloc)): if not wheel.supported(tags=supported_tags_noarch): logger.debug( "Skipping %s because it is a pypi-hosted binary " "Wheel on an unsupported platform", link, ) return version = wheel.version if not version: version = self._egg_info_matches(egg_info, search_name, link) if version is None: logger.debug( 'Skipping link %s; wrong project name (not %s)', link, search_name, ) return if (link.internal is not None and not link.internal and not normalize_name(search_name).lower() in self.allow_external and not self.allow_all_external): # We have a link that we are sure is external, so we should skip # it unless we are allowing externals logger.debug("Skipping %s because it is externally hosted.", link) self.need_warn_external = True return if (link.verifiable is not None and not link.verifiable and not (normalize_name(search_name).lower() in self.allow_unverified)): # We have a link that we are sure we cannot verify its integrity, # so we should skip it unless we are allowing unsafe installs # for this requirement. logger.debug( "Skipping %s because it is an insecure and unverifiable file.", link, ) self.need_warn_unverified = True return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: logger.debug( 'Skipping %s because Python version is incorrect', link ) return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search_name, version, link)
def get_page(cls, link, skip_archives=True, session=None): if session is None: raise TypeError( "get_page() missing 1 required keyword argument: 'session'") url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: if skip_archives: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = cls._get_content_type( url, session=session, ) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", "Cache-Control": "max-age=600", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = cls(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: cls._handle_fail(link, exc, url) except SSLError as exc: reason = ("There was a problem confirming the ssl certificate: " "%s" % exc) cls._handle_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: cls._handle_fail(link, "connection error: %s" % exc, url) except requests.Timeout: cls._handle_fail(link, "timed out", url) else: return inst
def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None, platform=None, versions=None, abi=None, implementation=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param platform: A string or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platform passed in. These packages will only be downloaded for distribution: they will not be built locally. :param versions: A list of strings or None. This is passed directly to pep425tags.py in the get_supported() method. :param abi: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param implementation: A string or None. This is passed directly to pep425tags.py in the get_supported() method. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'" ) # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # The valid tags to check potential found wheel candidates against self.valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available." ) break
def _get_html_page(link, session=None): # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] if session is None: raise TypeError( "_get_html_page() missing 1 required keyword argument: 'session'" ) url = link.url.split("#", 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: logger.warning( "Cannot look at %s URL %s because it does not support " "lookup as web pages.", vcs_scheme, link, ) return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib_parse.urlparse(url) if scheme == "file" and os.path.isdir(urllib_request.url2pathname(path)): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith("/"): url += "/" url = urllib_parse.urljoin(url, "index.html") logger.debug(" file: URL is directory, getting %s", url) try: resp = _get_html_response(url, session=session) except _NotHTTP: logger.warning( "Skipping page %s because it looks like an archive, and cannot " "be checked by a HTTP HEAD request.", link, ) except _NotHTML as exc: logger.warning( "Skipping page %s because the %s request got Content-Type: %s." "The only supported Content-Type is text/html", link, exc.request_desc, exc.content_type, ) except NetworkConnectionError as exc: _handle_get_page_fail(link, exc) except RetryError as exc: _handle_get_page_fail(link, exc) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) _handle_get_page_fail(link, reason, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, "connection error: {}".format(exc)) except requests.Timeout: _handle_get_page_fail(link, "timed out") else: return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing) return None
def __init__(self, index_url, session, use_datetime=False): xmlrpc_client.Transport.__init__(self, use_datetime) index_parts = urllib_parse.urlparse(index_url) self._scheme = index_parts.scheme self._session = session
def create( cls, find_links, # type: List[str] index_urls, # type: List[str] allow_all_prereleases=False, # type: bool trusted_hosts=None, # type: Optional[Iterable[str]] session=None, # type: Optional[PipSession] format_control=None, # type: Optional[FormatControl] platform=None, # type: Optional[str] versions=None, # type: Optional[List[str]] abi=None, # type: Optional[str] implementation=None, # type: Optional[str] prefer_binary=False # type: bool ): # type: (...) -> PackageFinder """Create a PackageFinder. :param trusted_hosts: Domains that we won't emit warnings for when not using HTTPS. :param session: The Session to use to make requests. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param platform: A string or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platform passed in. These packages will only be downloaded for distribution: they will not be built locally. :param versions: A list of strings or None. This is passed directly to pep425tags.py in the get_supported() method. :param abi: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param implementation: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param prefer_binary: Whether to prefer an old, but valid, binary dist over a new source dist. """ if session is None: raise TypeError( "PackageFinder.create() missing 1 required keyword argument: " "'session'") # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... built_find_links = [] # type: List[str] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link built_find_links.append(link) secure_origins = [("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # type: List[SecureOrigin] # The valid tags to check potential found wheel candidates against valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) candidate_evaluator = CandidateEvaluator( valid_tags=valid_tags, prefer_binary=prefer_binary, ) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(index_urls, built_find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available.") break return cls( candidate_evaluator=candidate_evaluator, find_links=built_find_links, index_urls=index_urls, secure_origins=secure_origins, session=session, allow_all_prereleases=allow_all_prereleases, format_control=format_control, )
for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link built_find_links.append(link) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. <<<<<<< HEAD if not HAS_TLS: ======= if not has_tls(): >>>>>>> development for link in itertools.chain(index_urls, built_find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == 'https': logger.warning( 'pip is configured with locations that require ' 'TLS/SSL, however the ssl module in Python is not ' 'available.' ) break return cls( find_links=built_find_links, index_urls=index_urls, ) def __init__( self,