def install_req_from_req_string(
        req_string,  # type: str
        comes_from=None,  # type: Optional[InstallRequirement]
        isolated=False,  # type: bool
        wheel_cache=None,  # type: Optional[WheelCache]
        use_pep517=None  # type: Optional[bool]
):
    # type: (...) -> InstallRequirement
    try:
        req = Requirement(req_string)
    except InvalidRequirement:
        raise InstallationError("Invalid requirement: '%s'" % req_string)

    domains_not_allowed = [
        PyPI.file_storage_domain,
        TestPyPI.file_storage_domain,
    ]
    if (req.url and comes_from and comes_from.link
            and comes_from.link.netloc in domains_not_allowed):
        # Explicitly disallow pypi packages that depend on external urls
        raise InstallationError(
            "Packages installed from PyPI cannot depend on packages "
            "which are not also hosted on PyPI.\n"
            "%s depends on %s " % (comes_from.name, req))

    return InstallRequirement(req,
                              comes_from,
                              isolated=isolated,
                              wheel_cache=wheel_cache,
                              use_pep517=use_pep517)
Example #2
0
    def run(self, options, args):
        with self._build_session(options) as session:
            reqs_to_uninstall = {}
            for name in args:
                req = install_req_from_line(
                    name,
                    isolated=options.isolated_mode,
                )
                if req.name:
                    reqs_to_uninstall[canonicalize_name(req.name)] = req
            for filename in options.requirements:
                for req in parse_requirements(filename,
                                              options=options,
                                              session=session):
                    if req.name:
                        reqs_to_uninstall[canonicalize_name(req.name)] = req
            if not reqs_to_uninstall:
                raise InstallationError(
                    'You must give at least one requirement to %(name)s (see '
                    '"ins help %(name)s")' % dict(name=self.name))

            protect_ins_from_modification_on_windows(
                modifying_ins="ins" in reqs_to_uninstall)

            for req in reqs_to_uninstall.values():
                uninstall_pathset = req.uninstall(
                    auto_confirm=options.yes,
                    verbose=self.verbosity > 0,
                )
                if uninstall_pathset:
                    uninstall_pathset.commit()
Example #3
0
    def prepare_editable_requirement(
            self,
            req,  # type: InstallRequirement
            require_hashes,  # type: bool
            use_user_site,  # type: bool
            finder  # type: PackageFinder
    ):
        # type: (...) -> DistAbstraction
        """Prepare an editable requirement
        """
        assert req.editable, "cannot prepare a non-editable req as editable"

        logger.info('Obtaining %s', req)

        with indent_log():
            if require_hashes:
                raise InstallationError(
                    'The editable requirement %s cannot be installed when '
                    'requiring hashes, because there is no single file to '
                    'hash.' % req)
            req.ensure_has_source_dir(self.src_dir)
            req.update_editable(not self._download_should_save)

            abstract_dist = make_abstract_dist(req)
            with self.req_tracker.track(req):
                abstract_dist.prep_for_dist(finder, self.build_isolation)

            if self._download_should_save:
                req.archive(self.download_dir)
            req.check_if_exists(use_user_site)

        return abstract_dist
Example #4
0
 def _raise_conflicts(self, conflicting_with, conflicting_reqs):
     conflict_messages = [
         '%s is incompatible with %s' % (installed, wanted)
         for installed, wanted in sorted(conflicting_reqs)
     ]
     raise InstallationError(
         "Some build dependencies for %s conflict with %s: %s." %
         (self.req, conflicting_with, ', '.join(conflict_messages)))
Example #5
0
def get_file_content(url, comes_from=None, session=None):
    # type: (str, Optional[str], Optional[insSession]) -> Tuple[str, Text]
    """Gets the content of a file; it may be a filename, file: URL, or
    http: URL.  Returns (location, content).  Content is unicode.

    :param url:         File path or url.
    :param comes_from:  Origin description of requirements.
    :param session:     Instance of ins.download.insSession.
    """
    if session is None:
        raise TypeError(
            "get_file_content() missing 1 required keyword argument: 'session'"
        )

    match = _scheme_re.search(url)
    if match:
        scheme = match.group(1).lower()
        if (scheme == 'file' and comes_from and
                comes_from.startswith('http')):
            raise InstallationError(
                'Requirements file %s references URL %s, which is local'
                % (comes_from, url))
        if scheme == 'file':
            path = url.split(':', 1)[1]
            path = path.replace('\\', '/')
            match = _url_slash_drive_re.match(path)
            if match:
                path = match.group(1) + ':' + path.split('|', 1)[1]
            path = urllib_parse.unquote(path)
            if path.startswith('/'):
                path = '/' + path.lstrip('/')
            url = path
        else:
            # FIXME: catch some errors
            resp = session.get(url)
            resp.raise_for_status()
            return resp.url, resp.text
    try:
        with open(url, 'rb') as f:
            content = auto_decode(f.read())
    except IOError as exc:
        raise InstallationError(
            'Could not open requirements file: %s' % str(exc)
        )
    return url, content
Example #6
0
 def _get_script_text(entry):
     if entry.suffix is None:
         raise InstallationError(
             "Invalid script entry point: %s for req: %s - A callable "
             "suffix is required. Cf https://packaging.python.org/en/"
             "latest/distributing.html#console-scripts for more "
             "information." % (entry, req))
     return maker.script_template % {
         "module": entry.prefix,
         "import_name": entry.suffix.split(".")[0],
         "func": entry.suffix,
     }
Example #7
0
 def _download_should_save(self):
     # type: () -> bool
     # TODO: Modify to reduce indentation needed
     if self.download_dir:
         self.download_dir = expanduser(self.download_dir)
         if os.path.exists(self.download_dir):
             return True
         else:
             logger.critical('Could not find download directory')
             raise InstallationError(
                 "Could not find or access download directory '%s'" %
                 display_path(self.download_dir))
     return False
Example #8
0
    def egg_info_path(self):
        # type: () -> str
        if self._egg_info_path is None:
            if self.editable:
                base = self.source_dir
            else:
                base = os.path.join(self.setup_py_dir, 'ins-egg-info')
            filenames = os.listdir(base)
            if self.editable:
                filenames = []
                for root, dirs, files in os.walk(base):
                    for dir in vcs.dirnames:
                        if dir in dirs:
                            dirs.remove(dir)
                    # Iterate over a copy of ``dirs``, since mutating
                    # a list while iterating over it can cause trouble.
                    # (See https://github.com/pypa/ins/pull/462.)
                    for dir in list(dirs):
                        # Don't search in anything that looks like a virtualenv
                        # environment
                        if (
                                os.path.lexists(
                                    os.path.join(root, dir, 'bin', 'python')
                                ) or
                                os.path.exists(
                                    os.path.join(
                                        root, dir, 'Scripts', 'Python.exe'
                                    )
                                )):
                            dirs.remove(dir)
                        # Also don't search through tests
                        elif dir == 'test' or dir == 'tests':
                            dirs.remove(dir)
                    filenames.extend([os.path.join(root, dir)
                                      for dir in dirs])
                filenames = [f for f in filenames if f.endswith('.egg-info')]

            if not filenames:
                raise InstallationError(
                    "Files/directories not found in %s" % base
                )
            # if we have more than one match, we pick the toplevel one.  This
            # can easily be the case if there is a dist folder which contains
            # an extracted tarball for testing purposes.
            if len(filenames) > 1:
                filenames.sort(
                    key=lambda x: x.count(os.path.sep) +
                    (os.path.altsep and x.count(os.path.altsep) or 0)
                )
            self._egg_info_path = os.path.join(base, filenames[0])
        return self._egg_info_path
Example #9
0
    def _correct_build_location(self):
        # type: () -> None
        """Move self._temp_build_dir to self._ideal_build_dir/self.req.name

        For some requirements (e.g. a path to a directory), the name of the
        package is not available until we run egg_info, so the build_location
        will return a temporary directory and store the _ideal_build_dir.

        This is only called by self.run_egg_info to fix the temporary build
        directory.
        """
        if self.source_dir is not None:
            return
        assert self.req is not None
        assert self._temp_build_dir.path
        assert (self._ideal_build_dir is not None and
                self._ideal_build_dir.path)  # type: ignore
        old_location = self._temp_build_dir.path
        self._temp_build_dir.path = None

        new_location = self.build_location(self._ideal_build_dir)
        if os.path.exists(new_location):
            raise InstallationError(
                'A package already exists in %s; please remove it to continue'
                % display_path(new_location))
        logger.debug(
            'Moving package %s from %s to new location %s',
            self, display_path(old_location), display_path(new_location),
        )
        shutil.move(old_location, new_location)
        self._temp_build_dir.path = new_location
        self._ideal_build_dir = None
        self.source_dir = os.path.normpath(os.path.abspath(new_location))
        self._egg_info_path = None

        # Correct the metadata directory, if it exists
        if self.metadata_directory:
            old_meta = self.metadata_directory
            rel = os.path.relpath(old_meta, start=old_location)
            new_meta = os.path.join(new_location, rel)
            new_meta = os.path.normpath(os.path.abspath(new_meta))
            self.metadata_directory = new_meta
Example #10
0
 def check_if_exists(self, use_user_site):
     # type: (bool) -> bool
     """Find an installed distribution that satisfies or conflicts
     with this requirement, and set self.satisfied_by or
     self.conflicts_with appropriately.
     """
     if self.req is None:
         return False
     try:
         # get_distribution() will resolve the entire list of requirements
         # anyway, and we've already determined that we need the requirement
         # in question, so strip the marker so that we don't try to
         # evaluate it.
         no_marker = Requirement(str(self.req))
         no_marker.marker = None
         self.satisfied_by = pkg_resources.get_distribution(str(no_marker))
         if self.editable and self.satisfied_by:
             self.conflicts_with = self.satisfied_by
             # when installing editables, nothing pre-existing should ever
             # satisfy
             self.satisfied_by = None
             return True
     except pkg_resources.DistributionNotFound:
         return False
     except pkg_resources.VersionConflict:
         existing_dist = pkg_resources.get_distribution(
             self.req.name
         )
         if use_user_site:
             if dist_in_usersite(existing_dist):
                 self.conflicts_with = existing_dist
             elif (running_under_virtualenv() and
                     dist_in_site_packages(existing_dist)):
                 raise InstallationError(
                     "Will not install to the user site because it will "
                     "lack sys.path precedence to %s in %s" %
                     (existing_dist.project_name, existing_dist.location)
                 )
         else:
             self.conflicts_with = existing_dist
     return True
Example #11
0
    def check_against_chunks(self, chunks):
        # type: (Iterator[bytes]) -> None
        """Check good hashes against ones built from iterable of chunks of
        data.

        Raise HashMismatch if none match.

        """
        gots = {}
        for hash_name in iterkeys(self._allowed):
            try:
                gots[hash_name] = hashlib.new(hash_name)
            except (ValueError, TypeError):
                raise InstallationError('Unknown hash name: %s' % hash_name)

        for chunk in chunks:
            for hash in itervalues(gots):
                hash.update(chunk)

        for hash_name, got in iteritems(gots):
            if got.hexdigest() in self._allowed[hash_name]:
                return
        self._raise(gots)
Example #12
0
def unpack_file(
    filename,  # type: str
    location,  # type: str
    content_type,  # type: Optional[str]
    link  # type: Optional[Link]
):
    # type: (...) -> None
    filename = os.path.realpath(filename)
    if (content_type == 'application/zip' or
            filename.lower().endswith(ZIP_EXTENSIONS) or
            zipfile.is_zipfile(filename)):
        unzip_file(
            filename,
            location,
            flatten=not filename.endswith('.whl')
        )
    elif (content_type == 'application/x-gzip' or
            tarfile.is_tarfile(filename) or
            filename.lower().endswith(
                TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
        untar_file(filename, location)
    elif (content_type and content_type.startswith('text/html') and
            is_svn_page(file_contents(filename))):
        # We don't really care about this
        from ins._internal.vcs.subversion import Subversion
        Subversion('svn+' + link.url).unpack(location)
    else:
        # FIXME: handle?
        # FIXME: magic signatures?
        logger.critical(
            'Cannot unpack file %s (downloaded from %s, content-type: %s); '
            'cannot detect archive format',
            filename, location, content_type,
        )
        raise InstallationError(
            'Cannot determine archive format of %s' % location
        )
Example #13
0
def install_req_from_editable(
        editable_req,  # type: str
        comes_from=None,  # type: Optional[str]
        use_pep517=None,  # type: Optional[bool]
        isolated=False,  # type: bool
        options=None,  # type: Optional[Dict[str, Any]]
        wheel_cache=None,  # type: Optional[WheelCache]
        constraint=False  # type: bool
):
    # type: (...) -> InstallRequirement
    name, url, extras_override = parse_editable(editable_req)
    if url.startswith('file:'):
        source_dir = url_to_path(url)
    else:
        source_dir = None

    if name is not None:
        try:
            req = Requirement(name)
        except InvalidRequirement:
            raise InstallationError("Invalid requirement: '%s'" % name)
    else:
        req = None
    return InstallRequirement(
        req,
        comes_from,
        source_dir=source_dir,
        editable=True,
        link=Link(url),
        constraint=constraint,
        use_pep517=use_pep517,
        isolated=isolated,
        options=options if options else {},
        wheel_cache=wheel_cache,
        extras=extras_override or (),
    )
Example #14
0
    def prepare_linked_requirement(
            self,
            req,  # type: InstallRequirement
            session,  # type: insSession
            finder,  # type: PackageFinder
            upgrade_allowed,  # type: bool
            require_hashes  # type: bool
    ):
        # type: (...) -> DistAbstraction
        """Prepare a requirement that would be obtained from req.link
        """
        # TODO: Breakup into smaller functions
        if req.link and req.link.scheme == 'file':
            path = url_to_path(req.link.url)
            logger.info('Processing %s', display_path(path))
        else:
            logger.info('Collecting %s', req)

        with indent_log():
            # @@ if filesystem packages are not marked
            # editable in a req, a non deterministic error
            # occurs when the script attempts to unpack the
            # build directory
            req.ensure_has_source_dir(self.build_dir)
            # If a checkout exists, it's unwise to keep going.  version
            # inconsistencies are logged later, but do not fail the
            # installation.
            # FIXME: this won't upgrade when there's an existing
            # package unpacked in `req.source_dir`
            # package unpacked in `req.source_dir`
            if os.path.exists(os.path.join(req.source_dir, 'setup.py')):
                raise PreviousBuildDirError(
                    "ins can't proceed with requirements '%s' due to a"
                    " pre-existing build directory (%s). This is "
                    "likely due to a previous installation that failed"
                    ". ins is being responsible and not assuming it "
                    "can delete this. Please delete it and try again." %
                    (req, req.source_dir))
            req.populate_link(finder, upgrade_allowed, require_hashes)

            # We can't hit this spot and have populate_link return None.
            # req.satisfied_by is None here (because we're
            # guarded) and upgrade has no impact except when satisfied_by
            # is not None.
            # Then inside find_requirement existing_applicable -> False
            # If no new versions are found, DistributionNotFound is raised,
            # otherwise a result is guaranteed.
            assert req.link
            link = req.link

            # Now that we have the real link, we can tell what kind of
            # requirements we have and raise some more informative errors
            # than otherwise. (For example, we can raise VcsHashUnsupported
            # for a VCS URL rather than HashMissing.)
            if require_hashes:
                # We could check these first 2 conditions inside
                # unpack_url and save repetition of conditions, but then
                # we would report less-useful error messages for
                # unhashable requirements, complaining that there's no
                # hash provided.
                if is_vcs_url(link):
                    raise VcsHashUnsupported()
                elif is_file_url(link) and is_dir_url(link):
                    raise DirectoryUrlHashUnsupported()
                if not req.original_link and not req.is_pinned:
                    # Unpinned packages are asking for trouble when a new
                    # version is uploaded. This isn't a security check, but
                    # it saves users a surprising hash mismatch in the
                    # future.
                    #
                    # file:/// URLs aren't pinnable, so don't complain
                    # about them not being pinned.
                    raise HashUnpinned()

            hashes = req.hashes(trust_internet=not require_hashes)
            if require_hashes and not hashes:
                # Known-good hashes are missing for this requirement, so
                # shim it with a facade object that will provoke hash
                # computation and then raise a HashMissing exception
                # showing the user what the hash should be.
                hashes = MissingHashes()

            try:
                download_dir = self.download_dir
                # We always delete unpacked sdists after ins ran.
                autodelete_unpacked = True
                if req.link.is_wheel and self.wheel_download_dir:
                    # when doing 'ins wheel` we download wheels to a
                    # dedicated dir.
                    download_dir = self.wheel_download_dir
                if req.link.is_wheel:
                    if download_dir:
                        # When downloading, we only unpack wheels to get
                        # metadata.
                        autodelete_unpacked = True
                    else:
                        # When installing a wheel, we use the unpacked
                        # wheel.
                        autodelete_unpacked = False
                unpack_url(req.link,
                           req.source_dir,
                           download_dir,
                           autodelete_unpacked,
                           session=session,
                           hashes=hashes,
                           progress_bar=self.progress_bar)
            except requests.HTTPError as exc:
                logger.critical(
                    'Could not install requirement %s because of error %s',
                    req,
                    exc,
                )
                raise InstallationError(
                    'Could not install requirement %s because of HTTP '
                    'error %s for URL %s' % (req, exc, req.link))
            abstract_dist = make_abstract_dist(req)
            with self.req_tracker.track(req):
                abstract_dist.prep_for_dist(finder, self.build_isolation)
            if self._download_should_save:
                # Make a .zip of the source_dir we already created.
                if not req.link.is_artifact:
                    req.archive(self.download_dir)
        return abstract_dist
Example #15
0
def load_pyproject_toml(
        use_pep517,  # type: Optional[bool]
        pyproject_toml,  # type: str
        setup_py,  # type: str
        req_name  # type: str
):
    # type: (...) -> Optional[Tuple[List[str], str, List[str]]]
    """Load the pyproject.toml file.

    Parameters:
        use_pep517 - Has the user requested PEP 517 processing? None
                     means the user hasn't explicitly specified.
        pyproject_toml - Location of the project's pyproject.toml file
        setup_py - Location of the project's setup.py file
        req_name - The name of the requirement we're processing (for
                   error reporting)

    Returns:
        None if we should use the legacy code path, otherwise a tuple
        (
            requirements from pyproject.toml,
            name of PEP 517 backend,
            requirements we should check are installed after setting
                up the build environment
        )
    """
    has_pyproject = os.path.isfile(pyproject_toml)
    has_setup = os.path.isfile(setup_py)

    if has_pyproject:
        with io.open(pyproject_toml, encoding="utf-8") as f:
            pp_toml = pytoml.load(f)
        build_system = pp_toml.get("build-system")
    else:
        build_system = None

    # The following cases must use PEP 517
    # We check for use_pep517 being non-None and falsey because that means
    # the user explicitly requested --no-use-pep517.  The value 0 as
    # opposed to False can occur when the value is provided via an
    # environment variable or config file option (due to the quirk of
    # strtobool() returning an integer in ins's configuration code).
    if has_pyproject and not has_setup:
        if use_pep517 is not None and not use_pep517:
            raise InstallationError("Disabling PEP 517 processing is invalid: "
                                    "project does not have a setup.py")
        use_pep517 = True
    elif build_system and "build-backend" in build_system:
        if use_pep517 is not None and not use_pep517:
            raise InstallationError("Disabling PEP 517 processing is invalid: "
                                    "project specifies a build backend of {} "
                                    "in pyproject.toml".format(
                                        build_system["build-backend"]))
        use_pep517 = True

    # If we haven't worked out whether to use PEP 517 yet,
    # and the user hasn't explicitly stated a preference,
    # we do so if the project has a pyproject.toml file.
    elif use_pep517 is None:
        use_pep517 = has_pyproject

    # At this point, we know whether we're going to use PEP 517.
    assert use_pep517 is not None

    # If we're using the legacy code path, there is nothing further
    # for us to do here.
    if not use_pep517:
        return None

    if build_system is None:
        # Either the user has a pyproject.toml with no build-system
        # section, or the user has no pyproject.toml, but has opted in
        # explicitly via --use-pep517.
        # In the absence of any explicit backend specification, we
        # assume the setuptools backend that most closely emulates the
        # traditional direct setup.py execution, and require wheel and
        # a version of setuptools that supports that backend.

        build_system = {
            "requires": ["setuptools>=40.8.0", "wheel"],
            "build-backend": "setuptools.build_meta:__legacy__",
        }

    # If we're using PEP 517, we have build system information (either
    # from pyproject.toml, or defaulted by the code above).
    # Note that at this point, we do not know if the user has actually
    # specified a backend, though.
    assert build_system is not None

    # Ensure that the build-system section in pyproject.toml conforms
    # to PEP 518.
    error_template = (
        "{package} has a pyproject.toml file that does not comply "
        "with PEP 518: {reason}")

    # Specifying the build-system table but not the requires key is invalid
    if "requires" not in build_system:
        raise InstallationError(
            error_template.format(
                package=req_name,
                reason=(
                    "it has a 'build-system' table but not "
                    "'build-system.requires' which is mandatory in the table"
                )))

    # Error out if requires is not a list of strings
    requires = build_system["requires"]
    if not _is_list_of_str(requires):
        raise InstallationError(
            error_template.format(
                package=req_name,
                reason="'build-system.requires' is not a list of strings.",
            ))

    backend = build_system.get("build-backend")
    check = []  # type: List[str]
    if backend is None:
        # If the user didn't specify a backend, we assume they want to use
        # the setuptools backend. But we can't be sure they have included
        # a version of setuptools which supplies the backend, or wheel
        # (which is needed by the backend) in their requirements. So we
        # make a note to check that those requirements are present once
        # we have set up the environment.
        # This is quite a lot of work to check for a very specific case. But
        # the problem is, that case is potentially quite common - projects that
        # adopted PEP 518 early for the ability to specify requirements to
        # execute setup.py, but never considered needing to mention the build
        # tools themselves. The original PEP 518 code had a similar check (but
        # implemented in a different way).
        backend = "setuptools.build_meta:__legacy__"
        check = ["setuptools>=40.8.0", "wheel"]

    return (requires, backend, check)
Example #16
0
    def run(self, options, args):
        cmdoptions.check_install_build_global(options)
        upgrade_strategy = "to-satisfy-only"
        if options.upgrade:
            upgrade_strategy = options.upgrade_strategy

        if options.build_dir:
            options.build_dir = os.path.abspath(options.build_dir)

        cmdoptions.check_dist_restriction(options, check_target=True)

        if options.python_version:
            python_versions = [options.python_version]
        else:
            python_versions = None

        options.src_dir = os.path.abspath(options.src_dir)
        install_options = options.install_options or []
        if options.use_user_site:
            if options.prefix_path:
                raise CommandError(
                    "Can not combine '--user' and '--prefix' as they imply "
                    "different installation locations"
                )
            if virtualenv_no_global():
                raise InstallationError(
                    "Can not perform a '--user' install. User site-packages "
                    "are not visible in this virtualenv."
                )
            install_options.append('--user')
            install_options.append('--prefix=')

        target_temp_dir = TempDirectory(kind="target")
        if options.target_dir:
            options.ignore_installed = True
            options.target_dir = os.path.abspath(options.target_dir)
            if (os.path.exists(options.target_dir) and not
                    os.path.isdir(options.target_dir)):
                raise CommandError(
                    "Target path exists but is not a directory, will not "
                    "continue."
                )

            # Create a target directory for using with the target option
            target_temp_dir.create()
            install_options.append('--home=' + target_temp_dir.path)

        global_options = options.global_options or []

        with self._build_session(options) as session:
            finder = self._build_package_finder(
                options=options,
                session=session,
                platform=options.platform,
                python_versions=python_versions,
                abi=options.abi,
                implementation=options.implementation,
            )
            build_delete = (not (options.no_clean or options.build_dir))
            wheel_cache = WheelCache(options.cache_dir, options.format_control)

            if options.cache_dir and not check_path_owner(options.cache_dir):
                logger.warning(
                    "The directory '%s' or its parent directory is not owned "
                    "by the current user and caching wheels has been "
                    "disabled. check the permissions and owner of that "
                    "directory. If executing ins with sudo, you may want "
                    "sudo's -H flag.",
                    options.cache_dir,
                )
                options.cache_dir = None

            with RequirementTracker() as req_tracker, TempDirectory(
                options.build_dir, delete=build_delete, kind="install"
            ) as directory:
                requirement_set = RequirementSet(
                    require_hashes=options.require_hashes,
                    check_supported_wheels=not options.target_dir,
                )

                try:
                    self.populate_requirement_set(
                        requirement_set, args, options, finder, session,
                        self.name, wheel_cache
                    )
                    preparer = RequirementPreparer(
                        build_dir=directory.path,
                        src_dir=options.src_dir,
                        download_dir=None,
                        wheel_download_dir=None,
                        progress_bar=options.progress_bar,
                        build_isolation=options.build_isolation,
                        req_tracker=req_tracker,
                    )

                    resolver = Resolver(
                        preparer=preparer,
                        finder=finder,
                        session=session,
                        wheel_cache=wheel_cache,
                        use_user_site=options.use_user_site,
                        upgrade_strategy=upgrade_strategy,
                        force_reinstall=options.force_reinstall,
                        ignore_dependencies=options.ignore_dependencies,
                        ignore_requires_python=options.ignore_requires_python,
                        ignore_installed=options.ignore_installed,
                        isolated=options.isolated_mode,
                        use_pep517=options.use_pep517
                    )
                    resolver.resolve(requirement_set)

                    protect_ins_from_modification_on_windows(
                        modifying_ins=requirement_set.has_requirement("ins")
                    )

                    # Consider legacy and PEP517-using requirements separately
                    legacy_requirements = []
                    pep517_requirements = []
                    for req in requirement_set.requirements.values():
                        if req.use_pep517:
                            pep517_requirements.append(req)
                        else:
                            legacy_requirements.append(req)

                    wheel_builder = WheelBuilder(
                        finder, preparer, wheel_cache,
                        build_options=[], global_options=[],
                    )

                    build_failures = build_wheels(
                        builder=wheel_builder,
                        pep517_requirements=pep517_requirements,
                        legacy_requirements=legacy_requirements,
                        session=session,
                    )

                    # If we're using PEP 517, we cannot do a direct install
                    # so we fail here.
                    if build_failures:
                        raise InstallationError(
                            "Could not build wheels for {} which use"
                            " PEP 517 and cannot be installed directly".format(
                                ", ".join(r.name for r in build_failures)))

                    to_install = resolver.get_installation_order(
                        requirement_set
                    )

                    # Consistency Checking of the package set we're installing.
                    should_warn_about_conflicts = (
                        not options.ignore_dependencies and
                        options.warn_about_conflicts
                    )
                    if should_warn_about_conflicts:
                        self._warn_about_conflicts(to_install)

                    # Don't warn about script install locations if
                    # --target has been specified
                    warn_script_location = options.warn_script_location
                    if options.target_dir:
                        warn_script_location = False

                    installed = install_given_reqs(
                        to_install,
                        install_options,
                        global_options,
                        root=options.root_path,
                        home=target_temp_dir.path,
                        prefix=options.prefix_path,
                        pycompile=options.compile,
                        warn_script_location=warn_script_location,
                        use_user_site=options.use_user_site,
                    )

                    lib_locations = get_lib_location_guesses(
                        user=options.use_user_site,
                        home=target_temp_dir.path,
                        root=options.root_path,
                        prefix=options.prefix_path,
                        isolated=options.isolated_mode,
                    )
                    working_set = pkg_resources.WorkingSet(lib_locations)

                    reqs = sorted(installed, key=operator.attrgetter('name'))
                    items = []
                    for req in reqs:
                        item = req.name
                        try:
                            installed_version = get_installed_version(
                                req.name, working_set=working_set
                            )
                            if installed_version:
                                item += '-' + installed_version
                        except Exception:
                            pass
                        items.append(item)
                    installed = ' '.join(items)
                    if installed:
                        logger.info('Successfully installed %s', installed)
                except EnvironmentError as error:
                    show_traceback = (self.verbosity >= 1)

                    message = create_env_error_message(
                        error, show_traceback, options.use_user_site,
                    )
                    logger.error(message, exc_info=show_traceback)

                    return ERROR
                except PreviousBuildDirError:
                    options.no_clean = True
                    raise
                finally:
                    # Clean up
                    if not options.no_clean:
                        requirement_set.cleanup_files()
                        wheel_cache.cleanup()

        if options.target_dir:
            self._handle_target_dir(
                options.target_dir, target_temp_dir, options.upgrade
            )
        return requirement_set
Example #17
0
    def add_requirement(
        self,
        install_req,  # type: InstallRequirement
        parent_req_name=None,  # type: Optional[str]
        extras_requested=None  # type: Optional[Iterable[str]]
    ):
        # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]  # noqa: E501
        """Add install_req as a requirement to install.

        :param parent_req_name: The name of the requirement that needed this
            added. The name is used because when multiple unnamed requirements
            resolve to the same name, we could otherwise end up with dependency
            links that point outside the Requirements set. parent_req must
            already be added. Note that None implies that this is a user
            supplied requirement, vs an inferred one.
        :param extras_requested: an iterable of extras used to evaluate the
            environment markers.
        :return: Additional requirements to scan. That is either [] if
            the requirement is not applicable, or [install_req] if the
            requirement is applicable and has just been added.
        """
        name = install_req.name

        # If the markers do not match, ignore this requirement.
        if not install_req.match_markers(extras_requested):
            logger.info(
                "Ignoring %s: markers '%s' don't match your environment",
                name, install_req.markers,
            )
            return [], None

        # If the wheel is not supported, raise an error.
        # Should check this after filtering out based on environment markers to
        # allow specifying different wheels based on the environment/OS, in a
        # single requirements file.
        if install_req.link and install_req.link.is_wheel:
            wheel = Wheel(install_req.link.filename)
            if self.check_supported_wheels and not wheel.supported():
                raise InstallationError(
                    "%s is not a supported wheel on this platform." %
                    wheel.filename
                )

        # This next bit is really a sanity check.
        assert install_req.is_direct == (parent_req_name is None), (
            "a direct req shouldn't have a parent and also, "
            "a non direct req should have a parent"
        )

        # Unnamed requirements are scanned again and the requirement won't be
        # added as a dependency until after scanning.
        if not name:
            # url or path requirement w/o an egg fragment
            self.unnamed_requirements.append(install_req)
            return [install_req], None

        try:
            existing_req = self.get_requirement(name)
        except KeyError:
            existing_req = None

        has_conflicting_requirement = (
            parent_req_name is None and
            existing_req and
            not existing_req.constraint and
            existing_req.extras == install_req.extras and
            existing_req.req.specifier != install_req.req.specifier
        )
        if has_conflicting_requirement:
            raise InstallationError(
                "Double requirement given: %s (already in %s, name=%r)"
                % (install_req, existing_req, name)
            )

        # When no existing requirement exists, add the requirement as a
        # dependency and it will be scanned again after.
        if not existing_req:
            self.requirements[name] = install_req
            # FIXME: what about other normalizations?  E.g., _ vs. -?
            if name.lower() != name:
                self.requirement_aliases[name.lower()] = name
            # We'd want to rescan this requirements later
            return [install_req], install_req

        # Assume there's no need to scan, and that we've already
        # encountered this for scanning.
        if install_req.constraint or not existing_req.constraint:
            return [], existing_req

        does_not_satisfy_constraint = (
            install_req.link and
            not (
                existing_req.link and
                install_req.link.path == existing_req.link.path
            )
        )
        if does_not_satisfy_constraint:
            self.reqs_to_cleanup.append(install_req)
            raise InstallationError(
                "Could not satisfy constraints for '%s': "
                "installation from path or url cannot be "
                "constrained to a version" % name,
            )
        # If we're now installing a constraint, mark the existing
        # object for real installation.
        existing_req.constraint = False
        existing_req.extras = tuple(sorted(
            set(existing_req.extras) | set(install_req.extras)
        ))
        logger.debug(
            "Setting %s extras to: %s",
            existing_req, existing_req.extras,
        )
        # Return the existing requirement for addition to the parent and
        # scanning again.
        return [existing_req], existing_req
Example #18
0
def parse_editable(editable_req):
    # type: (str) -> Tuple[Optional[str], str, Optional[Set[str]]]
    """Parses an editable requirement into:
        - a requirement name
        - an URL
        - extras
        - editable options
    Accepted requirements:
        svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
        .[some_extra]
    """

    url = editable_req

    # If a file path is specified with extras, strip off the extras.
    url_no_extras, extras = _strip_extras(url)

    if os.path.isdir(url_no_extras):
        if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
            msg = ('File "setup.py" not found. Directory cannot be installed '
                   'in editable mode: {}'.format(
                       os.path.abspath(url_no_extras)))
            pyproject_path = make_pyproject_path(url_no_extras)
            if os.path.isfile(pyproject_path):
                msg += ('\n(A "pyproject.toml" file was found, but editable '
                        'mode currently requires a setup.py based build.)')
            raise InstallationError(msg)

        # Treating it as code that has already been checked out
        url_no_extras = path_to_url(url_no_extras)

    if url_no_extras.lower().startswith('file:'):
        package_name = Link(url_no_extras).egg_fragment
        if extras:
            return (
                package_name,
                url_no_extras,
                Requirement("placeholder" + extras.lower()).extras,
            )
        else:
            return package_name, url_no_extras, None

    for version_control in vcs:
        if url.lower().startswith('%s:' % version_control):
            url = '%s+%s' % (version_control, url)
            break

    if '+' not in url:
        raise InstallationError(
            '%s should either be a path to a local project or a VCS url '
            'beginning with svn+, git+, hg+, or bzr+' % editable_req)

    vc_type = url.split('+', 1)[0].lower()

    if not vcs.get_backend(vc_type):
        error_message = 'For --editable=%s only ' % editable_req + \
            ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
            ' is currently supported'
        raise InstallationError(error_message)

    package_name = Link(url).egg_fragment
    if not package_name:
        raise InstallationError(
            "Could not detect requirement name for '%s', please specify one "
            "with #egg=your_package_name" % editable_req)
    return package_name, url, None
Example #19
0
def install_req_from_line(
        name,  # type: str
        comes_from=None,  # type: Optional[Union[str, InstallRequirement]]
        use_pep517=None,  # type: Optional[bool]
        isolated=False,  # type: bool
        options=None,  # type: Optional[Dict[str, Any]]
        wheel_cache=None,  # type: Optional[WheelCache]
        constraint=False  # type: bool
):
    # type: (...) -> InstallRequirement
    """Creates an InstallRequirement from a name, which might be a
    requirement, directory containing 'setup.py', filename, or URL.
    """
    if is_url(name):
        marker_sep = '; '
    else:
        marker_sep = ';'
    if marker_sep in name:
        name, markers_as_string = name.split(marker_sep, 1)
        markers_as_string = markers_as_string.strip()
        if not markers_as_string:
            markers = None
        else:
            markers = Marker(markers_as_string)
    else:
        markers = None
    name = name.strip()
    req_as_string = None
    path = os.path.normpath(os.path.abspath(name))
    link = None
    extras_as_string = None

    if is_url(name):
        link = Link(name)
    else:
        p, extras_as_string = _strip_extras(path)
        looks_like_dir = os.path.isdir(p) and (os.path.sep in name or
                                               (os.path.altsep is not None
                                                and os.path.altsep in name)
                                               or name.startswith('.'))
        if looks_like_dir:
            if not is_installable_dir(p):
                raise InstallationError(
                    "Directory %r is not installable. Neither 'setup.py' "
                    "nor 'pyproject.toml' found." % name)
            link = Link(path_to_url(p))
        elif is_archive_file(p):
            if not os.path.isfile(p):
                logger.warning(
                    'Requirement %r looks like a filename, but the '
                    'file does not exist', name)
            link = Link(path_to_url(p))

    # it's a local file, dir, or url
    if link:
        # Handle relative file URLs
        if link.scheme == 'file' and re.search(r'\.\./', link.url):
            link = Link(
                path_to_url(os.path.normpath(os.path.abspath(link.path))))
        # wheel file
        if link.is_wheel:
            wheel = Wheel(link.filename)  # can raise InvalidWheelFilename
            req_as_string = "%s==%s" % (wheel.name, wheel.version)
        else:
            # set the req to the egg fragment.  when it's not there, this
            # will become an 'unnamed' requirement
            req_as_string = link.egg_fragment

    # a requirement specifier
    else:
        req_as_string = name

    if extras_as_string:
        extras = Requirement("placeholder" + extras_as_string.lower()).extras
    else:
        extras = ()
    if req_as_string is not None:
        try:
            req = Requirement(req_as_string)
        except InvalidRequirement:
            if os.path.sep in req_as_string:
                add_msg = "It looks like a path."
                add_msg += deduce_helpful_msg(req_as_string)
            elif ('=' in req_as_string
                  and not any(op in req_as_string for op in operators)):
                add_msg = "= is not a valid operator. Did you mean == ?"
            else:
                add_msg = ""
            raise InstallationError("Invalid requirement: '%s'\n%s" %
                                    (req_as_string, add_msg))
    else:
        req = None

    return InstallRequirement(
        req,
        comes_from,
        link=link,
        markers=markers,
        use_pep517=use_pep517,
        isolated=isolated,
        options=options if options else {},
        wheel_cache=wheel_cache,
        constraint=constraint,
        extras=extras,
    )
Example #20
0
def call_subprocess(
    cmd,  # type: List[str]
    show_stdout=False,  # type: bool
    cwd=None,  # type: Optional[str]
    on_returncode='raise',  # type: str
    extra_ok_returncodes=None,  # type: Optional[Iterable[int]]
    command_desc=None,  # type: Optional[str]
    extra_environ=None,  # type: Optional[Mapping[str, Any]]
    unset_environ=None,  # type: Optional[Iterable[str]]
    spinner=None  # type: Optional[SpinnerInterface]
):
    # type: (...) -> Optional[Text]
    """
    Args:
      show_stdout: if true, use INFO to log the subprocess's stderr and
        stdout streams.  Otherwise, use DEBUG.  Defaults to False.
      extra_ok_returncodes: an iterable of integer return codes that are
        acceptable, in addition to 0. Defaults to None, which means [].
      unset_environ: an iterable of environment variable names to unset
        prior to calling subprocess.Popen().
    """
    if extra_ok_returncodes is None:
        extra_ok_returncodes = []
    if unset_environ is None:
        unset_environ = []
    # Most places in ins use show_stdout=False. What this means is--
    #
    # - We connect the child's output (combined stderr and stdout) to a
    #   single inse, which we read.
    # - We log this output to stderr at DEBUG level as it is received.
    # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't
    #   requested), then we show a spinner so the user can still see the
    #   subprocess is in progress.
    # - If the subprocess exits with an error, we log the output to stderr
    #   at ERROR level if it hasn't already been displayed to the console
    #   (e.g. if --verbose logging wasn't enabled).  This way we don't log
    #   the output to the console twice.
    #
    # If show_stdout=True, then the above is still done, but with DEBUG
    # replaced by INFO.
    if show_stdout:
        # Then log the subprocess output at INFO level.
        log_subprocess = subprocess_logger.info
        used_level = std_logging.INFO
    else:
        # Then log the subprocess output using DEBUG.  This also ensures
        # it will be logged to the log file (aka user_log), if enabled.
        log_subprocess = subprocess_logger.debug
        used_level = std_logging.DEBUG

    # Whether the subprocess will be visible in the console.
    showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level

    # Only use the spinner if we're not showing the subprocess output
    # and we have a spinner.
    use_spinner = not showing_subprocess and spinner is not None

    if command_desc is None:
        command_desc = format_command_args(cmd)

    log_subprocess("Running command %s", command_desc)
    env = os.environ.copy()
    if extra_environ:
        env.update(extra_environ)
    for name in unset_environ:
        env.pop(name, None)
    try:
        proc = subprocess.Popen(
            cmd, stderr=subprocess.STDOUT, stdin=subprocess.insE,
            stdout=subprocess.insE, cwd=cwd, env=env,
        )
        proc.stdin.close()
    except Exception as exc:
        subprocess_logger.critical(
            "Error %s while executing command %s", exc, command_desc,
        )
        raise
    all_output = []
    while True:
        line = console_to_str(proc.stdout.readline())
        if not line:
            break
        line = line.rstrip()
        all_output.append(line + '\n')

        # Show the line immediately.
        log_subprocess(line)
        # Update the spinner.
        if use_spinner:
            spinner.spin()
    try:
        proc.wait()
    finally:
        if proc.stdout:
            proc.stdout.close()
    proc_had_error = (
        proc.returncode and proc.returncode not in extra_ok_returncodes
    )
    if use_spinner:
        if proc_had_error:
            spinner.finish("error")
        else:
            spinner.finish("done")
    if proc_had_error:
        if on_returncode == 'raise':
            if not showing_subprocess:
                # Then the subprocess streams haven't been logged to the
                # console yet.
                subprocess_logger.error(
                    'Complete output from command %s:', command_desc,
                )
                # The all_output value already ends in a newline.
                subprocess_logger.error(''.join(all_output) + LOG_DIVIDER)
            raise InstallationError(
                'Command "%s" failed with error code %s in %s'
                % (command_desc, proc.returncode, cwd))
        elif on_returncode == 'warn':
            subprocess_logger.warning(
                'Command "%s" had error code %s in %s',
                command_desc, proc.returncode, cwd,
            )
        elif on_returncode == 'ignore':
            pass
        else:
            raise ValueError('Invalid value: on_returncode=%s' %
                             repr(on_returncode))
    return ''.join(all_output)