예제 #1
0
 def get_formatted_locations(self):
     # type: () -> str
     lines = []
     if self.index_urls and self.index_urls != [PyPI.simple_url]:
         lines.append(
             "Looking in indexes: {}".format(
                 ", ".join(redact_auth_from_url(url) for url in self.index_urls)
             )
         )
     if self.find_links:
         lines.append(
             "Looking in links: {}".format(
                 ", ".join(redact_auth_from_url(url) for url in self.find_links)
             )
         )
     return "\n".join(lines)
예제 #2
0
    def create(cls, session, options, suppress_no_index=False):
        # type: (PipSession, Values, bool) -> LinkCollector
        """
        :param session: The Session to use to make requests.
        :param suppress_no_index: Whether to ignore the --no-index option
            when constructing the SearchScope object.
        """
        index_urls = [options.index_url] + options.extra_index_urls
        if options.no_index and not suppress_no_index:
            logger.debug(
                'Ignoring indexes: %s',
                ','.join(redact_auth_from_url(url) for url in index_urls),
            )
            index_urls = []

        # Make sure find_links is a list before passing to create().
        find_links = options.find_links or []

        search_scope = SearchScope.create(
            find_links=find_links,
            index_urls=index_urls,
        )
        link_collector = LinkCollector(
            session=session,
            search_scope=search_scope,
        )
        return link_collector
예제 #3
0
def _prepare_download(
        resp,  # type: Response
        link,  # type: Link
        progress_bar  # type: str
):

    # type: (...) -> Iterable[bytes]

    total_length = _get_http_response_size(resp)

    if link.netloc == PyPI.file_storage_domain:

        url = link.show_url

    else:

        url = link.url_without_fragment

    logged_url = redact_auth_from_url(url)

    if total_length:

        logged_url = '{} ({})'.format(logged_url, format_size(total_length))

    if is_from_cache(resp):

        logger.info("Using cached %s", logged_url)

    else:

        logger.info("Downloading %s", logged_url)

    if logger.getEffectiveLevel() > logging.INFO:

        show_progress = False

    elif is_from_cache(resp):

        show_progress = False

    elif not total_length:

        show_progress = True

    elif total_length > (40 * 1000):

        show_progress = True

    else:

        show_progress = False

    chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)

    if not show_progress:

        return chunks

    return DownloadProgressProvider(progress_bar, max=total_length)(chunks)
예제 #4
0
    def get_formatted_locations(self):

        # type: () -> str

        lines = []

        redacted_index_urls = []

        if self.index_urls and self.index_urls != [PyPI.simple_url]:

            for url in self.index_urls:

                redacted_index_url = redact_auth_from_url(url)

                # Parse the URL

                purl = urllib.parse.urlsplit(redacted_index_url)

                # URL is generally invalid if scheme and netloc is missing

                # there are issues with Python and URL parsing, so this test

                # is a bit crude. See bpo-20271, bpo-23505. Python doesn't

                # always parse invalid URLs correctly - it should raise

                # exceptions for malformed URLs

                if not purl.scheme and not purl.netloc:

                    logger.warning(
                        'The index url "%s" seems invalid, '
                        'please provide a scheme.', redacted_index_url)

                redacted_index_urls.append(redacted_index_url)

            lines.append('Looking in indexes: {}'.format(
                ', '.join(redacted_index_urls)))

        if self.find_links:

            lines.append('Looking in links: {}'.format(', '.join(
                redact_auth_from_url(url) for url in self.find_links)))

        return '\n'.join(lines)
예제 #5
0
def _download_url(
        resp,  # type: Response
        link,  # type: Link
        content_file,  # type: IO[Any]
        hashes,  # type: Optional[Hashes]
        progress_bar  # type: str
):
    # type: (...) -> None
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0

    if link.netloc == PyPI.file_storage_domain:
        url = link.show_url
    else:
        url = link.url_without_fragment

    redacted_url = redact_auth_from_url(url)

    if total_length:
        logger.info("Downloading %s (%s)", redacted_url,
                    format_size(total_length))
    elif is_from_cache(resp):
        logger.info("Using cached %s", redacted_url)
    else:
        logger.info("Downloading %s", redacted_url)

    if logger.getEffectiveLevel() > logging.INFO:
        show_progress = False
    elif is_from_cache(resp):
        show_progress = False
    elif total_length > (40 * 1000):
        show_progress = True
    elif not total_length:
        show_progress = True
    else:
        show_progress = False

    def written_chunks(chunks):
        for chunk in chunks:
            content_file.write(chunk)
            yield chunk

    progress_indicator = _progress_indicator

    if show_progress:  # We don't show progress on cached responses
        progress_indicator = DownloadProgressProvider(progress_bar,
                                                      max=total_length)

    downloaded_chunks = written_chunks(
        progress_indicator(response_chunks(resp, CONTENT_CHUNK_SIZE),
                           CONTENT_CHUNK_SIZE))
    if hashes:
        hashes.check_against_chunks(downloaded_chunks)
    else:
        consume(downloaded_chunks)
예제 #6
0
 def __str__(self) -> str:
     if self.req:
         s = str(self.req)
         if self.link:
             s += " from {}".format(redact_auth_from_url(self.link.url))
     elif self.link:
         s = redact_auth_from_url(self.link.url)
     else:
         s = "<InstallRequirement>"
     if self.satisfied_by is not None:
         s += " in {}".format(display_path(self.satisfied_by.location))
     if self.comes_from:
         if isinstance(self.comes_from, str):
             comes_from: Optional[str] = self.comes_from
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += f" (from {comes_from})"
     return s
예제 #7
0
 def __str__(self):
     # type: () -> str
     if self.req:
         s = str(self.req)
         if self.link:
             s += ' from %s' % redact_auth_from_url(self.link.url)
     elif self.link:
         s = redact_auth_from_url(self.link.url)
     else:
         s = '<InstallRequirement>'
     if self.satisfied_by is not None:
         s += ' in %s' % display_path(self.satisfied_by.location)
     if self.comes_from:
         if isinstance(self.comes_from, six.string_types):
             comes_from = self.comes_from  # type: Optional[str]
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += ' (from %s)' % comes_from
     return s
예제 #8
0
파일: link.py 프로젝트: wikieden/lumberyard
    def __str__(self):

        # type: () -> str

        if self.requires_python:

            rp = f' (requires-python:{self.requires_python})'

        else:

            rp = ''

        if self.comes_from:

            return '{} (from {}){}'.format(redact_auth_from_url(self._url),
                                           self.comes_from, rp)

        else:

            return redact_auth_from_url(str(self._url))
 def __str__(self):
     # type: () -> str
     if req:
         s = str(req)
         if link:
             s += ' from {}'.format(redact_auth_from_url(link.url))
     elif link:
         s = redact_auth_from_url(link.url)
     else:
         s = '<InstallRequirement>'
     if satisfied_by is not None:
         s += ' in {}'.format(display_path(satisfied_by.location))
     if comes_from:
         if isinstance(comes_from, six.string_types):
             comes_from = comes_from  # type: Optional[str]
         else:
             comes_from = comes_from.from_path()
         if comes_from:
             s += ' (from {})'.format(comes_from)
     return s
예제 #10
0
 def __str__(self):
     # type: () -> str
     if self.req:
         s = str(self.req)
         if self.link:
             s += " from {}".format(redact_auth_from_url(self.link.url))
     elif self.link:
         s = redact_auth_from_url(self.link.url)
     else:
         s = "<InstallRequirement>"
     if self.satisfied_by is not None:
         s += " in {}".format(display_path(self.satisfied_by.location))
     if self.comes_from:
         if isinstance(self.comes_from, six.string_types):
             comes_from = self.comes_from  # type: Optional[str]
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += " (from {})".format(comes_from)
     return s
예제 #11
0
 def __str__(self):
     # type: () -> str
     if self.req:
         s = str(self.req)
         if self.link:
             s += ' from {}'.format(redact_auth_from_url(self.link.url))
     elif self.link:
         s = redact_auth_from_url(self.link.url)
     else:
         s = '<InstallRequirement>'
     if self.satisfied_by is not None:
         s += ' in {}'.format(display_path(self.satisfied_by.location))
     if self.comes_from:
         if isinstance(self.comes_from, str):
             comes_from = self.comes_from  # type: Optional[str]
         else:
             comes_from = self.comes_from.from_path()
         if comes_from:
             s += f' (from {comes_from})'
     return s
예제 #12
0
    def expand_default(self, option):
        default_value = None
        if self.parser is not None:
            self.parser._update_defaults(self.parser.defaults)
            default_value = self.parser.defaults.get(option.dest)
        help_text = optparse.IndentedHelpFormatter.expand_default(self, option)

        if default_value and option.metavar == 'URL':
            help_text = help_text.replace(default_value,
                                          redact_auth_from_url(default_value))

        return help_text
예제 #13
0
def _get_html_response(url, session):
    # type: (str, PipSession) -> Response
    """Access an HTML page with GET, and return the response.

    This consists of three parts:

    1. If the URL looks suspiciously like an archive, send a HEAD first to
       check the Content-Type is HTML, to avoid downloading a large file.
       Raise `_NotHTTP` if the content type cannot be determined, or
       `_NotHTML` if it is not HTML.
    2. Actually perform the request. Raise HTTP exceptions on network failures.
    3. Check the Content-Type header to make sure we got HTML, and raise
       `_NotHTML` otherwise.
    """
    if _is_url_like_archive(url):
        _ensure_html_response(url, session=session)

    logger.debug('Getting page %s', redact_auth_from_url(url))

    resp = session.get(
        url,
        headers={
            "Accept": "text/html",
            # We don't want to blindly returned cached data for
            # /simple/, because authors generally expecting that
            # twine upload && pip install will function, but if
            # they've done a pip install in the last ~10 minutes
            # it won't. Thus by setting this to zero we will not
            # blindly use any cached data, however the benefit of
            # using max-age=0 instead of no-cache, is that we will
            # still support conditional requests, so we will still
            # minimize traffic sent in cases where the page hasn't
            # changed at all, we will just always incur the round
            # trip for the conditional GET now instead of only
            # once per 10 minutes.
            # For more information, please see pypa/pip#5670.
            # However if we want to override Cache-Control, e.g. via CLI,
            # we can still do so.
            "Cache-Control": session.headers.get('Cache-Control', 'max-age=0'),
        },
    )
    resp.raise_for_status()

    # The check for archives above only works if the url ends with
    # something that looks like an archive. However that is not a
    # requirement of an url. Unless we issue a HEAD request on every
    # url we cannot know ahead of time for sure if something is HTML
    # or not. However we can check after we've downloaded it.
    _ensure_html_header(resp)

    return resp
예제 #14
0
    def __str__(self) -> str:
        versions = []
        pre_versions = []

        for candidate in sorted(self.candidates_tried):
            version = str(candidate.version)
            if candidate.version.is_prerelease:
                pre_versions.append(version)
            else:
                versions.append(version)

        lines = [f"Could not find a version that matches {self.ireq}"]

        if versions:
            lines.append(f"Tried: {', '.join(versions)}")

        if pre_versions:
            if self.finder.allow_all_prereleases:
                line = "Tried"
            else:
                line = "Skipped"

            line += f" pre-versions: {', '.join(pre_versions)}"
            lines.append(line)

        if versions or pre_versions:
            lines.append(
                "There are incompatible versions in the resolved dependencies:"
            )
            source_ireqs = getattr(self.ireq, "_source_ireqs", [])
            lines.extend(f"  {ireq}" for ireq in source_ireqs)
        else:
            redacted_urls = tuple(
                redact_auth_from_url(url) for url in self.finder.index_urls
            )
            lines.append("No versions found")
            lines.append(
                "{} {} reachable?".format(
                    "Were" if len(redacted_urls) > 1 else "Was",
                    " or ".join(redacted_urls),
                )
            )
        return "\n".join(lines)
예제 #15
0
    def expand_default(self, option):
        default_values = None
        if self.parser is not None:
            self.parser._update_defaults(self.parser.defaults)
            default_values = self.parser.defaults.get(option.dest)
        help_text = super().expand_default(option)

        if default_values and option.metavar == "URL":
            if isinstance(default_values, str):
                default_values = [default_values]

            # If its not a list, we should abort and just return the help text
            if not isinstance(default_values, list):
                default_values = []

            for val in default_values:
                help_text = help_text.replace(val, redact_auth_from_url(val))

        return help_text
예제 #16
0
파일: download.py 프로젝트: DiddiLeija/pip
def _prepare_download(
    resp: Response,
    link: Link,
    progress_bar: str,
) -> Iterable[bytes]:
    total_length = _get_http_response_size(resp)

    if link.netloc == PyPI.file_storage_domain:
        url = link.show_url
    else:
        url = link.url_without_fragment

    logged_url = redact_auth_from_url(url)

    if total_length:
        logged_url = "{} ({})".format(logged_url, format_size(total_length))

    if is_from_cache(resp):
        logger.info("Using cached %s", logged_url)
    else:
        logger.info("Downloading %s", logged_url)

    if logger.getEffectiveLevel() > logging.INFO:
        show_progress = False
    elif is_from_cache(resp):
        show_progress = False
    elif not total_length:
        show_progress = True
    elif total_length > (40 * 1000):
        show_progress = True
    else:
        show_progress = False

    chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)

    if not show_progress:
        return chunks

    renderer = get_download_progress_renderer(bar_type=progress_bar,
                                              size=total_length)
    return renderer(chunks)
예제 #17
0
    def get_formatted_locations(self):
        # type: () -> str
        lines = []
        redacted_index_urls = []
        if self.index_urls and self.index_urls != [PyPI.simple_url]:
            for url in self.index_urls:

                redacted_index_url = redact_auth_from_url(url)

                # Parse the URL
                purl = urllib_parse.urlsplit(redacted_index_url)

                # URL is generally invalid if scheme and netloc is missing
                # there are issues with Python and URL parsing, so this test
                # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
                # always parse invalid URLs correctly - it should raise
                # exceptions for malformed URLs
                if not purl.scheme and not purl.netloc:
                    logger.warning(
<<<<<<< HEAD
                        'The index url "{}" seems invalid, '
                        'please provide a scheme.'.format(redacted_index_url))
예제 #18
0
def make_search_scope(options, suppress_no_index=False):
    # type: (Values, bool) -> SearchScope
    """
    :param suppress_no_index: Whether to ignore the --no-index option
        when constructing the SearchScope object.
    """
    index_urls = [options.index_url] + options.extra_index_urls
    if options.no_index and not suppress_no_index:
        logger.debug(
            'Ignoring indexes: %s',
            ','.join(redact_auth_from_url(url) for url in index_urls),
        )
        index_urls = []

    # Make sure find_links is a list before passing to create().
    find_links = options.find_links or []

    search_scope = SearchScope.create(
        find_links=find_links,
        index_urls=index_urls,
    )

    return search_scope
예제 #19
0
def cli(
    ctx: click.Context,
    verbose: int,
    quiet: int,
    dry_run: bool,
    pre: bool,
    rebuild: bool,
    find_links: Tuple[str],
    index_url: str,
    extra_index_url: Tuple[str],
    cert: Optional[str],
    client_cert: Optional[str],
    trusted_host: Tuple[str],
    header: bool,
    emit_trusted_host: bool,
    annotate: bool,
    upgrade: bool,
    upgrade_packages: Tuple[str],
    output_file: Optional[LazyFile],
    allow_unsafe: bool,
    generate_hashes: bool,
    reuse_hashes: bool,
    src_files: Tuple[str],
    max_rounds: int,
    build_isolation: bool,
    emit_find_links: bool,
    cache_dir: str,
    pip_args_str: Optional[str],
    emit_index_url: bool,
) -> None:
    """Compiles requirements.txt from requirements.in specs."""
    log.verbosity = verbose - quiet

    if len(src_files) == 0:
        if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
            src_files = (DEFAULT_REQUIREMENTS_FILE, )
        elif os.path.exists("setup.py"):
            src_files = ("setup.py", )
        else:
            raise click.BadParameter(("If you do not specify an input file, "
                                      "the default is {} or setup.py"
                                      ).format(DEFAULT_REQUIREMENTS_FILE))

    if not output_file:
        # An output file must be provided for stdin
        if src_files == ("-", ):
            raise click.BadParameter(
                "--output-file is required if input is from stdin")
        # Use default requirements output file if there is a setup.py the source file
        elif os.path.basename(src_files[0]) in METADATA_FILENAMES:
            file_name = os.path.join(os.path.dirname(src_files[0]),
                                     DEFAULT_REQUIREMENTS_OUTPUT_FILE)
        # An output file must be provided if there are multiple source files
        elif len(src_files) > 1:
            raise click.BadParameter(
                "--output-file is required if two or more input files are given."
            )
        # Otherwise derive the output file from the source file
        else:
            base_name = src_files[0].rsplit(".", 1)[0]
            file_name = base_name + ".txt"

        output_file = click.open_file(file_name, "w+b", atomic=True, lazy=True)

        # Close the file at the end of the context execution
        assert output_file is not None
        ctx.call_on_close(safecall(output_file.close_intelligently))

    ###
    # Setup
    ###

    right_args = shlex.split(pip_args_str or "")
    pip_args = []
    for link in find_links:
        pip_args.extend(["-f", link])
    if index_url:
        pip_args.extend(["-i", index_url])
    for extra_index in extra_index_url:
        pip_args.extend(["--extra-index-url", extra_index])
    if cert:
        pip_args.extend(["--cert", cert])
    if client_cert:
        pip_args.extend(["--client-cert", client_cert])
    if pre:
        pip_args.extend(["--pre"])
    for host in trusted_host:
        pip_args.extend(["--trusted-host", host])

    if not build_isolation:
        pip_args.append("--no-build-isolation")
    pip_args.extend(right_args)

    repository: BaseRepository
    repository = PyPIRepository(pip_args, cache_dir=cache_dir)

    # Parse all constraints coming from --upgrade-package/-P
    upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)
    upgrade_install_reqs = {
        key_from_ireq(install_req): install_req
        for install_req in upgrade_reqs_gen
    }

    existing_pins_to_upgrade = set()

    # Proxy with a LocalRequirementsRepository if --upgrade is not specified
    # (= default invocation)
    if not upgrade and os.path.exists(output_file.name):
        # Use a temporary repository to ensure outdated(removed) options from
        # existing requirements.txt wouldn't get into the current repository.
        tmp_repository = PyPIRepository(pip_args, cache_dir=cache_dir)
        ireqs = parse_requirements(
            output_file.name,
            finder=tmp_repository.finder,
            session=tmp_repository.session,
            options=tmp_repository.options,
        )

        # Exclude packages from --upgrade-package/-P from the existing
        # constraints, and separately gather pins to be upgraded
        existing_pins = {}
        for ireq in filter(is_pinned_requirement, ireqs):
            key = key_from_ireq(ireq)
            if key in upgrade_install_reqs:
                existing_pins_to_upgrade.add(key)
            else:
                existing_pins[key] = ireq
        repository = LocalRequirementsRepository(existing_pins,
                                                 repository,
                                                 reuse_hashes=reuse_hashes)

    ###
    # Parsing/collecting initial requirements
    ###

    constraints = []
    for src_file in src_files:
        is_setup_file = os.path.basename(src_file) in METADATA_FILENAMES
        if src_file == "-":
            # pip requires filenames and not files. Since we want to support
            # piping from stdin, we need to briefly save the input from stdin
            # to a temporary file and have pip read that.  also used for
            # reading requirements from install_requires in setup.py.
            tmpfile = tempfile.NamedTemporaryFile(mode="wt", delete=False)
            tmpfile.write(sys.stdin.read())
            comes_from = "-r -"
            tmpfile.flush()
            reqs = list(
                parse_requirements(
                    tmpfile.name,
                    finder=repository.finder,
                    session=repository.session,
                    options=repository.options,
                ))
            for req in reqs:
                req.comes_from = comes_from
            constraints.extend(reqs)
        elif is_setup_file:
            dist = meta.load(os.path.dirname(os.path.abspath(src_file)))
            comes_from = f"{dist.metadata.get_all('Name')[0]} ({src_file})"
            constraints.extend([
                install_req_from_line(req, comes_from=comes_from)
                for req in dist.requires or []
            ])
        else:
            constraints.extend(
                parse_requirements(
                    src_file,
                    finder=repository.finder,
                    session=repository.session,
                    options=repository.options,
                ))

    primary_packages = {
        key_from_ireq(ireq)
        for ireq in constraints if not ireq.constraint
    }

    allowed_upgrades = primary_packages | existing_pins_to_upgrade
    constraints.extend(ireq for key, ireq in upgrade_install_reqs.items()
                       if key in allowed_upgrades)

    # Filter out pip environment markers which do not match (PEP496)
    constraints = [
        req for req in constraints if req.markers is None
        # We explicitly set extra=None to filter out optional requirements
        # since evaluating an extra marker with no environment raises UndefinedEnvironmentName
        # (see https://packaging.pypa.io/en/latest/markers.html#usage)
        or req.markers.evaluate({"extra": None})
    ]

    log.debug("Using indexes:")
    with log.indentation():
        for index_url in dedup(repository.finder.index_urls):
            log.debug(redact_auth_from_url(index_url))

    if repository.finder.find_links:
        log.debug("")
        log.debug("Using links:")
        with log.indentation():
            for find_link in dedup(repository.finder.find_links):
                log.debug(redact_auth_from_url(find_link))

    try:
        resolver = Resolver(
            constraints,
            repository,
            prereleases=repository.finder.allow_all_prereleases or pre,
            cache=DependencyCache(cache_dir),
            clear_caches=rebuild,
            allow_unsafe=allow_unsafe,
        )
        results = resolver.resolve(max_rounds=max_rounds)
        hashes = resolver.resolve_hashes(results) if generate_hashes else None
    except PipToolsError as e:
        log.error(str(e))
        sys.exit(2)

    log.debug("")

    ##
    # Output
    ##

    writer = OutputWriter(
        cast(BinaryIO, output_file),
        click_ctx=ctx,
        dry_run=dry_run,
        emit_header=header,
        emit_index_url=emit_index_url,
        emit_trusted_host=emit_trusted_host,
        annotate=annotate,
        generate_hashes=generate_hashes,
        default_index_url=repository.DEFAULT_INDEX_URL,
        index_urls=repository.finder.index_urls,
        trusted_hosts=repository.finder.trusted_hosts,
        format_control=repository.finder.format_control,
        allow_unsafe=allow_unsafe,
        find_links=repository.finder.find_links,
        emit_find_links=emit_find_links,
    )
    writer.write(
        results=results,
        unsafe_requirements=resolver.unsafe_constraints,
        markers={
            key_from_ireq(ireq): ireq.markers
            for ireq in constraints if ireq.markers
        },
        hashes=hashes,
    )

    if dry_run:
        log.info("Dry-run, so nothing updated.")
예제 #20
0
def get_compile_command(click_ctx: click.Context) -> str:
    """
    Returns a normalized compile command depending on cli context.

    The command will be normalized by:
        - expanding options short to long
        - removing values that are already default
        - sorting the arguments
        - removing one-off arguments like '--upgrade'
        - removing arguments that don't change build behaviour like '--verbose'
    """
    from piptools.scripts.compile import cli

    # Map of the compile cli options (option name -> click.Option)
    compile_options = {option.name: option for option in cli.params}

    left_args = []
    right_args = []

    for option_name, value in click_ctx.params.items():
        option = compile_options[option_name]

        # Collect variadic args separately, they will be added
        # at the end of the command later
        if option.nargs < 0:
            # These will necessarily be src_files
            # Re-add click-stripped '--' if any start with '-'
            if any(val.startswith("-") and val != "-" for val in value):
                right_args.append("--")
            right_args.extend([shlex.quote(val) for val in value])
            continue

        assert isinstance(option, click.Option)

        # Get the latest option name (usually it'll be a long name)
        option_long_name = option.opts[-1]

        # Exclude one-off options (--upgrade/--upgrade-package/--rebuild/...)
        # or options that don't change compile behaviour (--verbose/--dry-run/...)
        if option_long_name in COMPILE_EXCLUDE_OPTIONS:
            continue

        # Skip options without a value
        if option.default is None and not value:
            continue

        # Skip options with a default value
        if option.default == value:
            continue

        # Use a file name for file-like objects
        if isinstance(value, LazyFile):
            value = value.name

        # Convert value to the list
        if not isinstance(value, (tuple, list)):
            value = [value]

        for val in value:
            # Flags don't have a value, thus add to args true or false option long name
            if option.is_flag:
                # If there are false-options, choose an option name depending on a value
                if option.secondary_opts:
                    # Get the latest false-option
                    secondary_option_long_name = option.secondary_opts[-1]
                    arg = option_long_name if val else secondary_option_long_name
                # There are no false-options, use true-option
                else:
                    arg = option_long_name
                left_args.append(shlex.quote(arg))
            # Append to args the option with a value
            else:
                if isinstance(val, str) and is_url(val):
                    val = redact_auth_from_url(val)
                if option.name == "pip_args_str":
                    # shlex.quote() would produce functional but noisily quoted results,
                    # e.g. --pip-args='--cache-dir='"'"'/tmp/with spaces'"'"''
                    # Instead, we try to get more legible quoting via repr:
                    left_args.append(f"{option_long_name}={repr(val)}")
                else:
                    left_args.append(
                        f"{option_long_name}={shlex.quote(str(val))}")

    return " ".join(["pip-compile", *sorted(left_args), *sorted(right_args)])
예제 #21
0
 def __str__(self):
     # type: () -> str
     return redact_auth_from_url(self.url)
예제 #22
0
파일: test_utils.py 프로젝트: pradyunsg/pip
def test_redact_auth_from_url(auth_url: str, expected_url: str) -> None:
    url = redact_auth_from_url(auth_url)
    assert url == expected_url
예제 #23
0
def cli(
    ctx,
    verbose,
    quiet,
    dry_run,
    pre,
    rebuild,
    find_links,
    index_url,
    extra_index_url,
    cert,
    client_cert,
    trusted_host,
    header,
    index,
    emit_trusted_host,
    annotate,
    upgrade,
    upgrade_packages,
    output_file,
    allow_unsafe,
    generate_hashes,
    reuse_hashes,
    src_files,
    max_rounds,
    build_isolation,
    emit_find_links,
    cache_dir,
    pip_args,
    emit_index_url,
):
    """Compiles requirements.txt from requirements.in specs."""
    log.verbosity = verbose - quiet

    if len(src_files) == 0:
        if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
            src_files = (DEFAULT_REQUIREMENTS_FILE, )
        elif os.path.exists("setup.py"):
            src_files = ("setup.py", )
        else:
            raise click.BadParameter(("If you do not specify an input file, "
                                      "the default is {} or setup.py"
                                      ).format(DEFAULT_REQUIREMENTS_FILE))

    if not output_file:
        # An output file must be provided for stdin
        if src_files == ("-", ):
            raise click.BadParameter(
                "--output-file is required if input is from stdin")
        # Use default requirements output file if there is a setup.py the source file
        elif src_files == ("setup.py", ):
            file_name = DEFAULT_REQUIREMENTS_OUTPUT_FILE
        # An output file must be provided if there are multiple source files
        elif len(src_files) > 1:
            raise click.BadParameter(
                "--output-file is required if two or more input files are given."
            )
        # Otherwise derive the output file from the source file
        else:
            base_name = src_files[0].rsplit(".", 1)[0]
            file_name = base_name + ".txt"

        output_file = click.open_file(file_name, "w+b", atomic=True, lazy=True)

        # Close the file at the end of the context execution
        ctx.call_on_close(safecall(output_file.close_intelligently))

    if cli.has_arg("index") and cli.has_arg("emit_index_url"):
        raise click.BadParameter(
            "--index/--no-index and --emit-index-url/--no-emit-index-url "
            "are mutually exclusive.")
    elif cli.has_arg("index"):
        warnings.warn(
            "--index and --no-index are deprecated and will be removed "
            "in future versions. Use --emit-index-url/--no-emit-index-url instead.",
            category=FutureWarning,
        )
        emit_index_url = index

    ###
    # Setup
    ###

    right_args = shlex.split(pip_args or "")
    pip_args = []
    for link in find_links:
        pip_args.extend(["-f", link])
    if index_url:
        pip_args.extend(["-i", index_url])
    for extra_index in extra_index_url:
        pip_args.extend(["--extra-index-url", extra_index])
    if cert:
        pip_args.extend(["--cert", cert])
    if client_cert:
        pip_args.extend(["--client-cert", client_cert])
    if pre:
        pip_args.extend(["--pre"])
    for host in trusted_host:
        pip_args.extend(["--trusted-host", host])

    if not build_isolation:
        pip_args.append("--no-build-isolation")
    pip_args.extend(right_args)

    repository = PyPIRepository(pip_args, cache_dir=cache_dir)

    # Parse all constraints coming from --upgrade-package/-P
    upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)
    upgrade_install_reqs = {
        key_from_ireq(install_req): install_req
        for install_req in upgrade_reqs_gen
    }

    existing_pins_to_upgrade = set()

    # Proxy with a LocalRequirementsRepository if --upgrade is not specified
    # (= default invocation)
    if not upgrade and os.path.exists(output_file.name):
        # Use a temporary repository to ensure outdated(removed) options from
        # existing requirements.txt wouldn't get into the current repository.
        tmp_repository = PyPIRepository(pip_args, cache_dir=cache_dir)
        ireqs = parse_requirements(
            output_file.name,
            finder=tmp_repository.finder,
            session=tmp_repository.session,
            options=tmp_repository.options,
        )

        # Exclude packages from --upgrade-package/-P from the existing
        # constraints, and separately gather pins to be upgraded
        existing_pins = {}
        for ireq in filter(is_pinned_requirement, ireqs):
            key = key_from_ireq(ireq)
            if key in upgrade_install_reqs:
                existing_pins_to_upgrade.add(key)
            else:
                existing_pins[key] = ireq
        repository = LocalRequirementsRepository(existing_pins,
                                                 repository,
                                                 reuse_hashes=reuse_hashes)

    ###
    # Parsing/collecting initial requirements
    ###

    constraints = []
    for src_file in src_files:
        is_setup_file = os.path.basename(src_file) == "setup.py"
        if is_setup_file or src_file == "-":
            # pip requires filenames and not files. Since we want to support
            # piping from stdin, we need to briefly save the input from stdin
            # to a temporary file and have pip read that.  also used for
            # reading requirements from install_requires in setup.py.
            tmpfile = tempfile.NamedTemporaryFile(mode="wt", delete=False)
            if is_setup_file:
                from distutils.core import run_setup

                dist = run_setup(src_file)
                tmpfile.write("\n".join(dist.install_requires))
                comes_from = f"{dist.get_name()} ({src_file})"
            else:
                tmpfile.write(sys.stdin.read())
                comes_from = "-r -"
            tmpfile.flush()
            reqs = list(
                parse_requirements(
                    tmpfile.name,
                    finder=repository.finder,
                    session=repository.session,
                    options=repository.options,
                ))
            for req in reqs:
                req.comes_from = comes_from
            constraints.extend(reqs)
        else:
            constraints.extend(
                parse_requirements(
                    src_file,
                    finder=repository.finder,
                    session=repository.session,
                    options=repository.options,
                ))

    primary_packages = {
        key_from_ireq(ireq)
        for ireq in constraints if not ireq.constraint
    }

    allowed_upgrades = primary_packages | existing_pins_to_upgrade
    constraints.extend(ireq for key, ireq in upgrade_install_reqs.items()
                       if key in allowed_upgrades)

    # Filter out pip environment markers which do not match (PEP496)
    constraints = [
        req for req in constraints
        if req.markers is None or req.markers.evaluate()
    ]

    log.debug("Using indexes:")
    with log.indentation():
        for index_url in dedup(repository.finder.index_urls):
            log.debug(redact_auth_from_url(index_url))

    if repository.finder.find_links:
        log.debug("")
        log.debug("Using links:")
        with log.indentation():
            for find_link in dedup(repository.finder.find_links):
                log.debug(redact_auth_from_url(find_link))

    try:
        resolver = Resolver(
            constraints,
            repository,
            prereleases=repository.finder.allow_all_prereleases or pre,
            cache=DependencyCache(cache_dir),
            clear_caches=rebuild,
            allow_unsafe=allow_unsafe,
        )
        results = resolver.resolve(max_rounds=max_rounds)
        if generate_hashes:
            hashes = resolver.resolve_hashes(results)
        else:
            hashes = None
    except PipToolsError as e:
        log.error(str(e))
        sys.exit(2)

    log.debug("")

    ##
    # Output
    ##

    writer = OutputWriter(
        output_file,
        click_ctx=ctx,
        dry_run=dry_run,
        emit_header=header,
        emit_index_url=emit_index_url,
        emit_trusted_host=emit_trusted_host,
        annotate=annotate,
        generate_hashes=generate_hashes,
        default_index_url=repository.DEFAULT_INDEX_URL,
        index_urls=repository.finder.index_urls,
        trusted_hosts=repository.finder.trusted_hosts,
        format_control=repository.finder.format_control,
        allow_unsafe=allow_unsafe,
        find_links=repository.finder.find_links,
        emit_find_links=emit_find_links,
    )
    writer.write(
        results=results,
        unsafe_requirements=resolver.unsafe_constraints,
        markers={
            key_from_ireq(ireq): ireq.markers
            for ireq in constraints if ireq.markers
        },
        hashes=hashes,
    )

    if dry_run:
        log.info("Dry-run, so nothing updated.")
예제 #24
0
def test_redact_auth_from_url(auth_url, expected_url):
    url = redact_auth_from_url(auth_url)
    assert url == expected_url
예제 #25
0
<<<<<<< HEAD
        if self.index_urls and self.index_urls != [PyPI.simple_url]:
            lines.append(
                'Looking in indexes: {}'.format(', '.join(
                    redact_password_from_url(url) for url in self.index_urls))
            )
        if self.find_links:
            lines.append(
                'Looking in links: {}'.format(', '.join(
                    redact_password_from_url(url) for url in self.find_links))
=======
        redacted_index_urls = []
        if self.index_urls and self.index_urls != [PyPI.simple_url]:
            for url in self.index_urls:

                redacted_index_url = redact_auth_from_url(url)

                # Parse the URL
                purl = urllib_parse.urlsplit(redacted_index_url)

                # URL is generally invalid if scheme and netloc is missing
                # there are issues with Python and URL parsing, so this test
                # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
                # always parse invalid URLs correctly - it should raise
                # exceptions for malformed URLs
                if not purl.scheme and not purl.netloc:
                    logger.warning(
                        'The index url "{}" seems invalid, '
                        'please provide a scheme.'.format(redacted_index_url))

                redacted_index_urls.append(redacted_index_url)
예제 #26
0
 def __str__(self):
     return redact_auth_from_url(self.url)
예제 #27
0
def _get_simple_response(url: str, session: PipSession) -> Response:
    """Access an Simple API response with GET, and return the response.

    This consists of three parts:

    1. If the URL looks suspiciously like an archive, send a HEAD first to
       check the Content-Type is HTML or Simple API, to avoid downloading a
       large file. Raise `_NotHTTP` if the content type cannot be determined, or
       `_NotAPIContent` if it is not HTML or a Simple API.
    2. Actually perform the request. Raise HTTP exceptions on network failures.
    3. Check the Content-Type header to make sure we got a Simple API response,
       and raise `_NotAPIContent` otherwise.
    """
    if is_archive_file(Link(url).filename):
        _ensure_api_response(url, session=session)

    logger.debug("Getting page %s", redact_auth_from_url(url))

    resp = session.get(
        url,
        headers={
            "Accept":
            ", ".join([
                "application/vnd.pypi.simple.v1+json",
                "application/vnd.pypi.simple.v1+html; q=0.1",
                "text/html; q=0.01",
            ]),
            # We don't want to blindly returned cached data for
            # /simple/, because authors generally expecting that
            # twine upload && pip install will function, but if
            # they've done a pip install in the last ~10 minutes
            # it won't. Thus by setting this to zero we will not
            # blindly use any cached data, however the benefit of
            # using max-age=0 instead of no-cache, is that we will
            # still support conditional requests, so we will still
            # minimize traffic sent in cases where the page hasn't
            # changed at all, we will just always incur the round
            # trip for the conditional GET now instead of only
            # once per 10 minutes.
            # For more information, please see pypa/pip#5670.
            "Cache-Control":
            "max-age=0",
        },
    )
    raise_for_status(resp)

    # The check for archives above only works if the url ends with
    # something that looks like an archive. However that is not a
    # requirement of an url. Unless we issue a HEAD request on every
    # url we cannot know ahead of time for sure if something is a
    # Simple API response or not. However we can check after we've
    # downloaded it.
    _ensure_api_header(resp)

    logger.debug(
        "Fetched page %s as %s",
        redact_auth_from_url(url),
        resp.headers.get("Content-Type", "Unknown"),
    )

    return resp
예제 #28
0
    "-r",
    "--rebuild",
    is_flag=True,
    help="Clear any caches upfront, rebuild from scratch",
)
@click.option(
    "-f",
    "--find-links",
    multiple=True,
    help="Look for archives in this directory or on this HTML page",
)
@click.option(
    "-i",
    "--index-url",
    help="Change index URL (defaults to {index_url})".format(
        index_url=redact_auth_from_url(_get_default_option("index_url"))),
)
@click.option("--extra-index-url",
              multiple=True,
              help="Add additional index URL to search")
@click.option("--cert", help="Path to alternate CA bundle.")
@click.option(
    "--client-cert",
    help="Path to SSL client certificate, a single file containing "
    "the private key and the certificate in PEM format.",
)
@click.option(
    "--trusted-host",
    multiple=True,
    help="Mark this host as trusted, even though it does not have "
    "valid or any HTTPS.",
예제 #29
0
                        'The index url "%s" seems invalid, '
                        'please provide a scheme.', redacted_index_url)
=======
                        'The index url "{}" seems invalid, '
                        'please provide a scheme.'.format(redacted_index_url))
>>>>>>> b66a76afa15ab74019740676a52a071b85ed8f71

                redacted_index_urls.append(redacted_index_url)

            lines.append('Looking in indexes: {}'.format(
                ', '.join(redacted_index_urls)))

        if self.find_links:
            lines.append(
                'Looking in links: {}'.format(', '.join(
                    redact_auth_from_url(url) for url in self.find_links))
            )
        return '\n'.join(lines)

    def get_index_urls_locations(self, project_name):
        # type: (str) -> List[str]
        """Returns the locations found via self.index_urls

        Checks the url_name on the main (first in the list) index and
        use this url_name to produce all locations
        """

        def mkurl_pypi_url(url):
            # type: (str) -> str
            loc = posixpath.join(
                url,
예제 #30
0

def _prepare_download(
    resp,  # type: Response
    link,  # type: Link
    progress_bar  # type: str
):
    # type: (...) -> Iterable[bytes]
    total_length = _get_http_response_size(resp)

    if link.netloc == PyPI.file_storage_domain:
        url = link.show_url
    else:
        url = link.url_without_fragment

    logged_url = redact_auth_from_url(url)

    if total_length:
        logged_url = '{} ({})'.format(logged_url, format_size(total_length))

    if is_from_cache(resp):
        logger.info("Using cached %s", logged_url)
    else:
        logger.info("Downloading %s", logged_url)

    if logger.getEffectiveLevel() > logging.INFO:
        show_progress = False
    elif is_from_cache(resp):
        show_progress = False
    elif not total_length:
        show_progress = True