Beispiel #1
0
 def __str__(self):
     if self.requires_python:
         rp = ' (requires-python:%s)' % self.requires_python
     else:
         rp = ''
     if self.comes_from:
         return '%s (from %s)%s' % (redact_password_from_url(self.url),
                                    self.comes_from, rp)
     else:
         return redact_password_from_url(str(self.url))
Beispiel #2
0
    def fetch_new(self, dest, url, rev_options):
        rev_display = rev_options.to_display()
        logger.info(
            'Cloning %s%s to %s', redact_password_from_url(url),
            rev_display, display_path(dest),
        )
        self.run_command(['clone', '-q', url, dest])

        if rev_options.rev:
            # Then a specific revision was requested.
            rev_options = self.resolve_revision(dest, url, rev_options)
            branch_name = getattr(rev_options, 'branch_name', None)
            if branch_name is None:
                # Only do a checkout if the current commit id doesn't match
                # the requested revision.
                if not self.is_commit_id_equal(dest, rev_options.rev):
                    cmd_args = ['checkout', '-q'] + rev_options.to_args()
                    self.run_command(cmd_args, cwd=dest)
            elif self.get_branch(dest) != branch_name:
                # Then a specific branch was requested, and that branch
                # is not yet checked out.
                track_branch = 'origin/{}'.format(branch_name)
                cmd_args = [
                    'checkout', '-b', branch_name, '--track', track_branch,
                ]
                self.run_command(cmd_args, cwd=dest)

        #: repo may contain submodules
        self.update_submodules(dest)
    def _build_package_finder(
        self,
        options,               # type: Values
        session,               # type: PipSession
        platform=None,         # type: Optional[str]
        python_versions=None,  # type: Optional[List[str]]
        abi=None,              # type: Optional[str]
        implementation=None    # type: Optional[str]
    ):
        # type: (...) -> PackageFinder
        """
        Create a package finder appropriate to this requirement command.
        """
        index_urls = [options.index_url] + options.extra_index_urls
        if options.no_index:
            logger.debug(
                'Ignoring indexes: %s',
                ','.join(redact_password_from_url(url) for url in index_urls),
            )
            index_urls = []

        return PackageFinder(
            find_links=options.find_links,
            format_control=options.format_control,
            index_urls=index_urls,
            trusted_hosts=options.trusted_hosts,
            allow_all_prereleases=options.pre,
            session=session,
            platform=platform,
            versions=python_versions,
            abi=abi,
            implementation=implementation,
            prefer_binary=options.prefer_binary,
        )
Beispiel #4
0
 def __str__(self):
     if self.req:
         s = str(self.req)
         if self.link:
             s += ' from %s' % redact_password_from_url(self.link.url)
     elif self.link:
         s = redact_password_from_url(self.link.url)
     else:
         s = '<InstallRequirement>'
     if self.satisfied_by is not None:
         s += ' in %s' % display_path(self.satisfied_by.location)
     if self.comes_from:
         if isinstance(self.comes_from, six.string_types):
             comes_from = self.comes_from
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += ' (from %s)' % comes_from
     return s
Beispiel #5
0
 def __str__(self):
     # type: () -> str
     if self.req:
         s = str(self.req)
         if self.link:
             s += ' from %s' % redact_password_from_url(self.link.url)
     elif self.link:
         s = redact_password_from_url(self.link.url)
     else:
         s = '<InstallRequirement>'
     if self.satisfied_by is not None:
         s += ' in %s' % display_path(self.satisfied_by.location)
     if self.comes_from:
         if isinstance(self.comes_from, six.string_types):
             comes_from = self.comes_from
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += ' (from %s)' % comes_from
     return s
 def __str__(self):
     # type: () -> str
     if self.req:
         s = str(self.req)
         if self.link:
             s += " from %s" % redact_password_from_url(self.link.url)
     elif self.link:
         s = redact_password_from_url(self.link.url)
     else:
         s = "<InstallRequirement>"
     if self.satisfied_by is not None:
         s += " in %s" % display_path(self.satisfied_by.location)
     if self.comes_from:
         if isinstance(self.comes_from, six.string_types):
             comes_from = self.comes_from  # type: Optional[str]
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += " (from %s)" % comes_from
     return s
Beispiel #7
0
 def get_formatted_locations(self):
     lines = []
     if self.index_urls and self.index_urls != [PyPI.simple_url]:
         lines.append(
             "Looking in indexes: {}".format(", ".join(
                 redact_password_from_url(url) for url in self.index_urls))
         )
     if self.find_links:
         lines.append(
             "Looking in links: {}".format(", ".join(self.find_links))
         )
     return "\n".join(lines)
Beispiel #8
0
Datei: index.py Projekt: pypa/pip
def _get_html_response(url, session):
    # type: (str, PipSession) -> Response
    """Access an HTML page with GET, and return the response.

    This consists of three parts:

    1. If the URL looks suspiciously like an archive, send a HEAD first to
       check the Content-Type is HTML, to avoid downloading a large file.
       Raise `_NotHTTP` if the content type cannot be determined, or
       `_NotHTML` if it is not HTML.
    2. Actually perform the request. Raise HTTP exceptions on network failures.
    3. Check the Content-Type header to make sure we got HTML, and raise
       `_NotHTML` otherwise.
    """
    if _is_url_like_archive(url):
        _ensure_html_response(url, session=session)

    logger.debug('Getting page %s', redact_password_from_url(url))

    resp = session.get(
        url,
        headers={
            "Accept": "text/html",
            # We don't want to blindly returned cached data for
            # /simple/, because authors generally expecting that
            # twine upload && pip install will function, but if
            # they've done a pip install in the last ~10 minutes
            # it won't. Thus by setting this to zero we will not
            # blindly use any cached data, however the benefit of
            # using max-age=0 instead of no-cache, is that we will
            # still support conditional requests, so we will still
            # minimize traffic sent in cases where the page hasn't
            # changed at all, we will just always incur the round
            # trip for the conditional GET now instead of only
            # once per 10 minutes.
            # For more information, please see pypa/pip#5670.
            "Cache-Control": "max-age=0",
        },
    )
    resp.raise_for_status()

    # The check for archives above only works if the url ends with
    # something that looks like an archive. However that is not a
    # requirement of an url. Unless we issue a HEAD request on every
    # url we cannot know ahead of time for sure if something is HTML
    # or not. However we can check after we've downloaded it.
    _ensure_html_header(resp)

    return resp
Beispiel #9
0
def _get_html_response(url, session):
    # type: (str, PipSession) -> Response
    """Access an HTML page with GET, and return the response.

    This consists of three parts:

    1. If the URL looks suspiciously like an archive, send a HEAD first to
       check the Content-Type is HTML, to avoid downloading a large file.
       Raise `_NotHTTP` if the content type cannot be determined, or
       `_NotHTML` if it is not HTML.
    2. Actually perform the request. Raise HTTP exceptions on network failures.
    3. Check the Content-Type header to make sure we got HTML, and raise
       `_NotHTML` otherwise.
    """
    if _is_url_like_archive(url):
        _ensure_html_response(url, session=session)

    logger.debug('Getting page %s', redact_password_from_url(url))

    resp = session.get(
        url,
        headers={
            "Accept": "text/html",
            # We don't want to blindly returned cached data for
            # /simple/, because authors generally expecting that
            # twine upload && pip install will function, but if
            # they've done a pip install in the last ~10 minutes
            # it won't. Thus by setting this to zero we will not
            # blindly use any cached data, however the benefit of
            # using max-age=0 instead of no-cache, is that we will
            # still support conditional requests, so we will still
            # minimize traffic sent in cases where the page hasn't
            # changed at all, we will just always incur the round
            # trip for the conditional GET now instead of only
            # once per 10 minutes.
            # For more information, please see pypa/pip#5670.
            "Cache-Control": "max-age=0",
        },
    )
    resp.raise_for_status()

    # The check for archives above only works if the url ends with
    # something that looks like an archive. However that is not a
    # requirement of an url. Unless we issue a HEAD request on every
    # url we cannot know ahead of time for sure if something is HTML
    # or not. However we can check after we've downloaded it.
    _ensure_html_header(resp)

    return resp
Beispiel #10
0
def make_search_scope(options, suppress_no_index=False):
    """
    :param suppress_no_index: Whether to ignore the --no-index option
        when constructing the SearchScope object.
    """
    index_urls = [options.index_url] + options.extra_index_urls
    if options.no_index and not suppress_no_index:
        logger.debug(
            'Ignoring indexes: %s',
            ','.join(redact_password_from_url(url) for url in index_urls),
        )
        index_urls = []

    search_scope = SearchScope(
        find_links=options.find_links,
        index_urls=index_urls,
    )

    return search_scope
Beispiel #11
0
    def _build_package_finder(
            self,
            options,  # type: Values
            session,  # type: PipSession
            platform=None,  # type: Optional[str]
            py_version_info=None,  # type: Optional[Tuple[int, ...]]
            abi=None,  # type: Optional[str]
            implementation=None,  # type: Optional[str]
            ignore_requires_python=None,  # type: Optional[bool]
    ):
        # type: (...) -> PackageFinder
        """
        Create a package finder appropriate to this requirement command.

        :param ignore_requires_python: Whether to ignore incompatible
            "Requires-Python" values in links. Defaults to False.
        """
        index_urls = [options.index_url] + options.extra_index_urls
        if options.no_index:
            logger.debug(
                'Ignoring indexes: %s',
                ','.join(redact_password_from_url(url) for url in index_urls),
            )
            index_urls = []

        target_python = TargetPython(
            platform=platform,
            py_version_info=py_version_info,
            abi=abi,
            implementation=implementation,
        )

        return PackageFinder.create(
            find_links=options.find_links,
            format_control=options.format_control,
            index_urls=index_urls,
            trusted_hosts=options.trusted_hosts,
            allow_all_prereleases=options.pre,
            session=session,
            target_python=target_python,
            prefer_binary=options.prefer_binary,
            ignore_requires_python=ignore_requires_python,
        )
Beispiel #12
0
def make_search_scope(options, suppress_no_index=False):
    # type: (Values, bool) -> SearchScope
    """
    :param suppress_no_index: Whether to ignore the --no-index option
        when constructing the SearchScope object.
    """
    index_urls = [options.index_url] + options.extra_index_urls
    if options.no_index and not suppress_no_index:
        logger.debug(
            'Ignoring indexes: %s',
            ','.join(redact_password_from_url(url) for url in index_urls),
        )
        index_urls = []

    # Make sure find_links is a list before passing to create().
    find_links = options.find_links or []

    search_scope = SearchScope.create(
        find_links=find_links, index_urls=index_urls,
    )

    return search_scope
Beispiel #13
0
Datei: git.py Projekt: zqdely/pip
    def fetch_new(self, dest, url, rev_options):
        # type: (str, str, RevOptions) -> None
        rev_display = rev_options.to_display()
        logger.info(
            'Cloning %s%s to %s',
            redact_password_from_url(url),
            rev_display,
            display_path(dest),
        )
        self.run_command(['clone', '-q', url, dest])

        if rev_options.rev:
            # Then a specific revision was requested.
            rev_options = self.resolve_revision(dest, url, rev_options)
            branch_name = getattr(rev_options, 'branch_name', None)
            if branch_name is None:
                # Only do a checkout if the current commit id doesn't match
                # the requested revision.
                if not self.is_commit_id_equal(dest, rev_options.rev):
                    cmd_args = ['checkout', '-q'] + rev_options.to_args()
                    self.run_command(cmd_args, cwd=dest)
            elif self.get_current_branch(dest) != branch_name:
                # Then a specific branch was requested, and that branch
                # is not yet checked out.
                track_branch = 'origin/{}'.format(branch_name)
                cmd_args = [
                    'checkout',
                    '-b',
                    branch_name,
                    '--track',
                    track_branch,
                ]
                self.run_command(cmd_args, cwd=dest)

        #: repo may contain submodules
        self.update_submodules(dest)
Beispiel #14
0
 def __str__(self):
     return redact_password_from_url(self.url)
Beispiel #15
0
def test_redact_password_from_url(auth_url, expected_url):
    url = redact_password_from_url(auth_url)
    assert url == expected_url
Beispiel #16
0
 def __str__(self):
     return redact_password_from_url(self.url)
Beispiel #17
0
def test_redact_password_from_url(auth_url, expected_url):
    url = redact_password_from_url(auth_url)
    assert url == expected_url
        self,
        find_links,  # type: List[str]
        index_urls,  # type: List[str]
    ):
        # type: (...) -> None
        self.find_links = find_links
        self.index_urls = index_urls

    def get_formatted_locations(self):
        # type: () -> str
        lines = []
<<<<<<< HEAD
        if self.index_urls and self.index_urls != [PyPI.simple_url]:
            lines.append(
                'Looking in indexes: {}'.format(', '.join(
                    redact_password_from_url(url) for url in self.index_urls))
            )
        if self.find_links:
            lines.append(
                'Looking in links: {}'.format(', '.join(
                    redact_password_from_url(url) for url in self.find_links))
=======
        redacted_index_urls = []
        if self.index_urls and self.index_urls != [PyPI.simple_url]:
            for url in self.index_urls:

                redacted_index_url = redact_auth_from_url(url)

                # Parse the URL
                purl = urllib_parse.urlsplit(redacted_index_url)