示例#1
0
    def get_formatted_locations(self) -> str:
        lines = []
        redacted_index_urls = []
        if self.index_urls and self.index_urls != [PyPI.simple_url]:
            for url in self.index_urls:

                redacted_index_url = redact_auth_from_url(url)

                # Parse the URL
                purl = urllib.parse.urlsplit(redacted_index_url)

                # URL is generally invalid if scheme and netloc is missing
                # there are issues with Python and URL parsing, so this test
                # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
                # always parse invalid URLs correctly - it should raise
                # exceptions for malformed URLs
                if not purl.scheme and not purl.netloc:
                    logger.warning(
                        'The index url "%s" seems invalid, please provide a scheme.',
                        redacted_index_url,
                    )

                redacted_index_urls.append(redacted_index_url)

            lines.append("Looking in indexes: {}".format(
                ", ".join(redacted_index_urls)))

        if self.find_links:
            lines.append("Looking in links: {}".format(", ".join(
                redact_auth_from_url(url) for url in self.find_links)))
        return "\n".join(lines)
示例#2
0
文件: link.py 项目: stjordanis/pipenv
 def __str__(self) -> str:
     if self.requires_python:
         rp = f' (requires-python:{self.requires_python})'
     else:
         rp = ''
     if self.comes_from:
         return '{} (from {}){}'.format(
             redact_auth_from_url(self._url), self.comes_from, rp)
     else:
         return redact_auth_from_url(str(self._url))
示例#3
0
 def __str__(self):
     if self.requires_python:
         rp = ' (requires-python:%s)' % self.requires_python
     else:
         rp = ''
     if self.comes_from:
         return '%s (from %s)%s' % (redact_auth_from_url(
             self._url), self.comes_from, rp)
     else:
         return redact_auth_from_url(str(self._url))
 def get_formatted_locations(self):
     # type: () -> str
     lines = []
     if self.index_urls and self.index_urls != [PyPI.simple_url]:
         lines.append('Looking in indexes: {}'.format(', '.join(
             redact_auth_from_url(url) for url in self.index_urls)))
     if self.find_links:
         lines.append('Looking in links: {}'.format(', '.join(
             redact_auth_from_url(url) for url in self.find_links)))
     return '\n'.join(lines)
示例#5
0
 def __str__(self) -> str:
     if self.requires_python:
         rp = f" (requires-python:{self.requires_python})"
     else:
         rp = ""
     if self.comes_from:
         return "{} (from {}){}".format(redact_auth_from_url(self._url),
                                        self.comes_from, rp)
     else:
         return redact_auth_from_url(str(self._url))
示例#6
0
    def create(
        cls,
        session: PipSession,
        options: Values,
        suppress_no_index: bool = False,
        index_lookup: Optional[Dict[str, List[str]]] = None,
    ) -> "LinkCollector":
        """
        :param session: The Session to use to make requests.
        :param suppress_no_index: Whether to ignore the --no-index option
            when constructing the SearchScope object.
        """
        index_urls = [options.index_url] + options.extra_index_urls
        if options.no_index and not suppress_no_index:
            logger.debug(
                "Ignoring indexes: %s",
                ",".join(redact_auth_from_url(url) for url in index_urls),
            )
            index_urls = []

        # Make sure find_links is a list before passing to create().
        find_links = options.find_links or []

        search_scope = SearchScope.create(find_links=find_links,
                                          index_urls=index_urls,
                                          index_lookup=index_lookup)
        link_collector = LinkCollector(session=session,
                                       search_scope=search_scope,
                                       index_lookup=index_lookup)
        return link_collector
示例#7
0
def make_link_collector(
    session,  # type: PipSession
    options,  # type: Values
    suppress_no_index=False,  # type: bool
):
    # type: (...) -> LinkCollector
    """
    :param session: The Session to use to make requests.
    :param suppress_no_index: Whether to ignore the --no-index option
        when constructing the SearchScope object.
    """
    index_urls = [options.index_url] + options.extra_index_urls
    if options.no_index and not suppress_no_index:
        logger.debug(
            'Ignoring indexes: %s',
            ','.join(redact_auth_from_url(url) for url in index_urls),
        )
        index_urls = []

    # Make sure find_links is a list before passing to create().
    find_links = options.find_links or []

    search_scope = SearchScope.create(
        find_links=find_links, index_urls=index_urls,
    )

    link_collector = LinkCollector(session=session, search_scope=search_scope)

    return link_collector
示例#8
0
 def __str__(self) -> str:
     if self.req:
         s = str(self.req)
         if self.link:
             s += " from {}".format(redact_auth_from_url(self.link.url))
     elif self.link:
         s = redact_auth_from_url(self.link.url)
     else:
         s = "<InstallRequirement>"
     if self.satisfied_by is not None:
         s += " in {}".format(display_path(self.satisfied_by.location))
     if self.comes_from:
         if isinstance(self.comes_from, str):
             comes_from: Optional[str] = self.comes_from
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += f" (from {comes_from})"
     return s
示例#9
0
 def __str__(self):
     # type: () -> str
     if self.req:
         s = str(self.req)
         if self.link:
             s += ' from %s' % redact_auth_from_url(self.link.url)
     elif self.link:
         s = redact_auth_from_url(self.link.url)
     else:
         s = '<InstallRequirement>'
     if self.satisfied_by is not None:
         s += ' in %s' % display_path(self.satisfied_by.location)
     if self.comes_from:
         if isinstance(self.comes_from, six.string_types):
             comes_from = self.comes_from  # type: Optional[str]
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += ' (from %s)' % comes_from
     return s
示例#10
0
def _get_html_response(url, session):
    # type: (str, PipSession) -> Response
    """Access an HTML page with GET, and return the response.

    This consists of three parts:

    1. If the URL looks suspiciously like an archive, send a HEAD first to
       check the Content-Type is HTML, to avoid downloading a large file.
       Raise `_NotHTTP` if the content type cannot be determined, or
       `_NotHTML` if it is not HTML.
    2. Actually perform the request. Raise HTTP exceptions on network failures.
    3. Check the Content-Type header to make sure we got HTML, and raise
       `_NotHTML` otherwise.
    """
    if _is_url_like_archive(url):
        _ensure_html_response(url, session=session)

    logger.debug('Getting page %s', redact_auth_from_url(url))

    resp = session.get(
        url,
        headers={
            "Accept": "text/html",
            # We don't want to blindly returned cached data for
            # /simple/, because authors generally expecting that
            # twine upload && pip install will function, but if
            # they've done a pip install in the last ~10 minutes
            # it won't. Thus by setting this to zero we will not
            # blindly use any cached data, however the benefit of
            # using max-age=0 instead of no-cache, is that we will
            # still support conditional requests, so we will still
            # minimize traffic sent in cases where the page hasn't
            # changed at all, we will just always incur the round
            # trip for the conditional GET now instead of only
            # once per 10 minutes.
            # For more information, please see pypa/pip#5670.
            "Cache-Control": "max-age=0",
        },
    )
    resp.raise_for_status()

    # The check for archives above only works if the url ends with
    # something that looks like an archive. However that is not a
    # requirement of an url. Unless we issue a HEAD request on every
    # url we cannot know ahead of time for sure if something is HTML
    # or not. However we can check after we've downloaded it.
    _ensure_html_header(resp)

    return resp
示例#11
0
    def expand_default(self, option: optparse.Option) -> str:
        default_values = None
        if self.parser is not None:
            assert isinstance(self.parser, ConfigOptionParser)
            self.parser._update_defaults(self.parser.defaults)
            assert option.dest is not None
            default_values = self.parser.defaults.get(option.dest)
        help_text = super().expand_default(option)

        if default_values and option.metavar == "URL":
            if isinstance(default_values, str):
                default_values = [default_values]

            # If its not a list, we should abort and just return the help text
            if not isinstance(default_values, list):
                default_values = []

            for val in default_values:
                help_text = help_text.replace(val, redact_auth_from_url(val))

        return help_text
示例#12
0
def _prepare_download(
    resp: Response,
    link: Link,
    progress_bar: str,
) -> Iterable[bytes]:
    total_length = _get_http_response_size(resp)

    if link.netloc == PyPI.file_storage_domain:
        url = link.show_url
    else:
        url = link.url_without_fragment

    logged_url = redact_auth_from_url(url)

    if total_length:
        logged_url = "{} ({})".format(logged_url, format_size(total_length))

    if is_from_cache(resp):
        logger.info("Using cached %s", logged_url)
    else:
        logger.info("Downloading %s", logged_url)

    if logger.getEffectiveLevel() > logging.INFO:
        show_progress = False
    elif is_from_cache(resp):
        show_progress = False
    elif not total_length:
        show_progress = True
    elif total_length > (40 * 1000):
        show_progress = True
    else:
        show_progress = False

    chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)

    if not show_progress:
        return chunks

    renderer = get_download_progress_renderer(bar_type=progress_bar, size=total_length)
    return renderer(chunks)
def _prepare_download(
        resp,  # type: Response
        link,  # type: Link
        progress_bar  # type: str
):
    # type: (...) -> Iterable[bytes]
    total_length = _get_http_response_size(resp)

    if link.netloc == PyPI.file_storage_domain:
        url = link.show_url
    else:
        url = link.url_without_fragment

    logged_url = redact_auth_from_url(url)

    if total_length:
        logged_url = '{} ({})'.format(logged_url, format_size(total_length))

    if is_from_cache(resp):
        logger.info("Using cached %s", logged_url)
    else:
        logger.info("Downloading %s", logged_url)

    if logger.getEffectiveLevel() > logging.INFO:
        show_progress = False
    elif is_from_cache(resp):
        show_progress = False
    elif not total_length:
        show_progress = True
    elif total_length > (40 * 1000):
        show_progress = True
    else:
        show_progress = False

    chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)

    if not show_progress:
        return chunks

    return DownloadProgressProvider(progress_bar, max=total_length)(chunks)
示例#14
0
 def __str__(self) -> str:
     return redact_auth_from_url(self.url)
 def __str__(self):
     # type: () -> str
     return redact_auth_from_url(self.url)