Пример #1
0
    def run(self, options, args):
        with self._build_session(options) as session:
            reqs_to_uninstall = {}
            for name in args:
                req = install_req_from_line(
                    name,
                    isolated=options.isolated_mode,
                )
                if req.name:
                    reqs_to_uninstall[canonicalize_name(req.name)] = req
            for filename in options.requirements:
                for req in parse_requirements(filename,
                                              options=options,
                                              session=session):
                    if req.name:
                        reqs_to_uninstall[canonicalize_name(req.name)] = req
            if not reqs_to_uninstall:
                raise InstallationError(
                    'You must give at least one requirement to %(name)s (see '
                    '"ins help %(name)s")' % dict(name=self.name))

            protect_ins_from_modification_on_windows(
                modifying_ins="ins" in reqs_to_uninstall)

            for req in reqs_to_uninstall.values():
                uninstall_pathset = req.uninstall(
                    auto_confirm=options.yes,
                    verbose=self.verbosity > 0,
                )
                if uninstall_pathset:
                    uninstall_pathset.commit()
Пример #2
0
 def mkurl_pypi_url(url):
     loc = posixpath.join(
         url, urllib_parse.quote(canonicalize_name(project_name)))
     # For maximum compatibility with easy_install, ensure the path
     # ends in a trailing slash.  Although this isn't in the spec
     # (and PyPI can handle it without the slash) some other index
     # implementations might break if they relied on easy_install's
     # behavior.
     if not loc.endswith('/'):
         loc = loc + '/'
     return loc
Пример #3
0
def should_use_ephemeral_cache(
        req,  # type: InstallRequirement
        format_control,  # type: FormatControl
        autobuilding,  # type: bool
        cache_available  # type: bool
):
    # type: (...) -> Optional[bool]
    """
    Return whether to build an InstallRequirement object using the
    ephemeral cache.

    :param cache_available: whether a cache directory is available for the
        autobuilding=True case.

    :return: True or False to build the requirement with ephem_cache=True
        or False, respectively; or None not to build the requirement.
    """
    if req.constraint:
        return None
    if req.is_wheel:
        if not autobuilding:
            logger.info(
                'Skipping %s, due to already being wheel.',
                req.name,
            )
        return None
    if not autobuilding:
        return False

    if req.editable or not req.source_dir:
        return None

    if req.link and not req.link.is_artifact:
        # VCS checkout. Build wheel just for this run.
        return True

    if "binary" not in format_control.get_allowed_formats(
            canonicalize_name(req.name)):
        logger.info(
            "Skipping bdist_wheel for %s, due to binaries "
            "being disabled for it.",
            req.name,
        )
        return None

    link = req.link
    base, ext = link.splitext()
    if cache_available and _contains_egg_info(base):
        return False

    # Otherwise, build the wheel just for this run using the ephemeral
    # cache since we are either in the case of e.g. a local directory, or
    # no cache directory is available to use.
    return True
Пример #4
0
    def prepare_metadata(self):
        # type: () -> None
        """Ensure that project metadata is available.

        Under PEP 517, call the backend hook to prepare the metadata.
        Under legacy processing, call setup.py egg-info.
        """
        assert self.source_dir

        with indent_log():
            if self.use_pep517:
                self.prepare_pep517_metadata()
            else:
                self.run_egg_info()

        if not self.req:
            if isinstance(parse_version(self.metadata["Version"]), Version):
                op = "=="
            else:
                op = "==="
            self.req = Requirement(
                "".join([
                    self.metadata["Name"],
                    op,
                    self.metadata["Version"],
                ])
            )
            self._correct_build_location()
        else:
            metadata_name = canonicalize_name(self.metadata["Name"])
            if canonicalize_name(self.req.name) != metadata_name:
                logger.warning(
                    'Generating metadata for package %s '
                    'produced metadata for project name %s. Fix your '
                    '#egg=%s fragments.',
                    self.name, metadata_name, self.name
                )
                self.req = Requirement(metadata_name)
Пример #5
0
def _create_whitelist(would_be_installed, package_set):
    # type: (Set[str], PackageSet) -> Set[str]
    packages_affected = set(would_be_installed)

    for package_name in package_set:
        if package_name in packages_affected:
            continue

        for req in package_set[package_name].requires:
            if canonicalize_name(req.name) in packages_affected:
                packages_affected.add(package_name)
                break

    return packages_affected
Пример #6
0
def check_package_set(package_set, should_ignore=None):
    # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult
    """Check if a package set is consistent

    If should_ignore is passed, it should be a callable that takes a
    package name and returns a boolean.
    """
    if should_ignore is None:

        def should_ignore(name):
            return False

    missing = dict()
    conflicting = dict()

    for package_name in package_set:
        # Info about dependencies of package_name
        missing_deps = set()  # type: Set[Missing]
        conflicting_deps = set()  # type: Set[Conflicting]

        if should_ignore(package_name):
            continue

        for req in package_set[package_name].requires:
            name = canonicalize_name(req.project_name)  # type: str

            # Check if it's missing
            if name not in package_set:
                missed = True
                if req.marker is not None:
                    missed = req.marker.evaluate()
                if missed:
                    missing_deps.add((name, req))
                continue

            # Check if there's a conflict
            version = package_set[name].version  # type: str
            if not req.specifier.contains(version, prereleases=True):
                conflicting_deps.add((name, version, req))

        if missing_deps:
            missing[package_name] = sorted(missing_deps, key=str)
        if conflicting_deps:
            conflicting[package_name] = sorted(conflicting_deps, key=str)

    return missing, conflicting
Пример #7
0
def _simulate_installation_of(to_install, package_set):
    # type: (List[InstallRequirement], PackageSet) -> Set[str]
    """Computes the version of packages after installing to_install.
    """

    # Keep track of packages that were installed
    installed = set()

    # Modify it as installing requirement_set would (assuming no errors)
    for inst_req in to_install:
        dist = make_abstract_dist(inst_req).dist()
        name = canonicalize_name(dist.key)
        package_set[name] = PackageDetails(dist.version, dist.requires())

        installed.add(name)

    return installed
Пример #8
0
    def _get_candidates(self, link, package_name):
        # type: (Link, Optional[str]) -> List[Any]
        can_not_cache = (not self.cache_dir or not package_name or not link)
        if can_not_cache:
            return []

        canonical_name = canonicalize_name(package_name)
        formats = self.format_control.get_allowed_formats(canonical_name)
        if not self.allowed_formats.intersection(formats):
            return []

        root = self.get_path_for_link(link)
        try:
            return os.listdir(root)
        except OSError as err:
            if err.errno in {errno.ENOENT, errno.ENOTDIR}:
                return []
            raise
Пример #9
0
 def handle_mutual_excludes(value, target, other):
     # type: (str, Optional[Set], Optional[Set]) -> None
     new = value.split(',')
     while ':all:' in new:
         other.clear()
         target.clear()
         target.add(':all:')
         del new[:new.index(':all:') + 1]
         # Without a none, we want to discard everything as :all: covers it
         if ':none:' not in new:
             return
     for name in new:
         if name == ':none:':
             target.clear()
             continue
         name = canonicalize_name(name)
         other.discard(name)
         target.add(name)
Пример #10
0
def create_package_set_from_installed(**kwargs):
    # type: (**Any) -> Tuple[PackageSet, bool]
    """Converts a list of distributions into a PackageSet.
    """
    # Default to using all packages installed on the system
    if kwargs == {}:
        kwargs = {"local_only": False, "skip": ()}

    package_set = {}
    problems = False
    for dist in get_installed_distributions(**kwargs):
        name = canonicalize_name(dist.project_name)
        try:
            package_set[name] = PackageDetails(dist.version, dist.requires())
        except RequirementParseError as e:
            # Don't crash on broken metadata
            logging.warning("Error parsing requirements for %s: %s", name, e)
            problems = True
    return package_set, problems
Пример #11
0
def _find_name_version_sep(egg_info, canonical_name):
    # type: (str, str) -> int
    """Find the separator's index based on the package's canonical name.

    `egg_info` must be an egg info string for the given package, and
    `canonical_name` must be the package's canonical name.

    This function is needed since the canonicalized name does not necessarily
    have the same length as the egg info's name part. An example::

    >>> egg_info = 'foo__bar-1.0'
    >>> canonical_name = 'foo-bar'
    >>> _find_name_version_sep(egg_info, canonical_name)
    8
    """
    # Project name and version must be separated by one single dash. Find all
    # occurrences of dashes; if the string in front of it matches the canonical
    # name, this is the one separating the name and version parts.
    for i, c in enumerate(egg_info):
        if c != "-":
            continue
        if canonicalize_name(egg_info[:i]) == canonical_name:
            return i
    raise ValueError("{} does not match {}".format(egg_info, canonical_name))
Пример #12
0
    def clobber(source, dest, is_base, fixer=None, filter=None):
        ensure_dir(dest)  # common for the 'include' path

        for dir, subdirs, files in os.walk(source):
            basedir = dir[len(source):].lstrip(os.path.sep)
            destdir = os.path.join(dest, basedir)
            if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
                continue
            for s in subdirs:
                destsubdir = os.path.join(dest, basedir, s)
                if is_base and basedir == '' and destsubdir.endswith('.data'):
                    data_dirs.append(s)
                    continue
                elif (is_base and s.endswith('.dist-info')
                      and canonicalize_name(s).startswith(
                          canonicalize_name(req.name))):
                    assert not info_dir, ('Multiple .dist-info directories: ' +
                                          destsubdir + ', ' +
                                          ', '.join(info_dir))
                    info_dir.append(destsubdir)
            for f in files:
                # Skip unwanted files
                if filter and filter(f):
                    continue
                srcfile = os.path.join(dir, f)
                destfile = os.path.join(dest, basedir, f)
                # directory creation is lazy and after the file filtering above
                # to ensure we don't install empty dirs; empty dirs can't be
                # uninstalled.
                ensure_dir(destdir)

                # copyfile (called below) truncates the destination if it
                # exists and then writes the new contents. This is fine in most
                # cases, but can cause a segfault if ins has loaded a shared
                # object (e.g. from pyopenssl through its vendored urllib3)
                # Since the shared object is mmap'd an attempt to call a
                # symbol in it will then cause a segfault. Unlinking the file
                # allows writing of new contents while allowing the process to
                # continue to use the old copy.
                if os.path.exists(destfile):
                    os.unlink(destfile)

                # We use copyfile (not move, copy, or copy2) to be extra sure
                # that we are not moving directories over (copyfile fails for
                # directories) as well as to ensure that we are not copying
                # over any metadata because we want more control over what
                # metadata we actually copy over.
                shutil.copyfile(srcfile, destfile)

                # Copy over the metadata for the file, currently this only
                # includes the atime and mtime.
                st = os.stat(srcfile)
                if hasattr(os, "utime"):
                    os.utime(destfile, (st.st_atime, st.st_mtime))

                # If our file is executable, then make our destination file
                # executable.
                if os.access(srcfile, os.X_OK):
                    st = os.stat(srcfile)
                    permissions = (st.st_mode | stat.S_IXUSR | stat.S_IXGRP
                                   | stat.S_IXOTH)
                    os.chmod(destfile, permissions)

                changed = False
                if fixer:
                    changed = fixer(destfile)
                record_installed(srcfile, destfile, changed)
Пример #13
0
    def _link_package_versions(self, link, search):
        # type: (Link, Search) -> Optional[InstallationCandidate]
        """Return an InstallationCandidate or None"""
        version = None
        if link.egg_fragment:
            egg_info = link.egg_fragment
            ext = link.ext
        else:
            egg_info, ext = link.splitext()
            if not ext:
                self._log_skipped_link(link, 'not a file')
                return None
            if ext not in SUPPORTED_EXTENSIONS:
                self._log_skipped_link(
                    link,
                    'unsupported archive format: %s' % ext,
                )
                return None
            if "binary" not in search.formats and ext == WHEEL_EXTENSION:
                self._log_skipped_link(
                    link,
                    'No binaries permitted for %s' % search.supplied,
                )
                return None
            if "macosx10" in link.path and ext == '.zip':
                self._log_skipped_link(link, 'macosx10 one')
                return None
            if ext == WHEEL_EXTENSION:
                try:
                    wheel = Wheel(link.filename)
                except InvalidWheelFilename:
                    self._log_skipped_link(link, 'invalid wheel filename')
                    return None
                if canonicalize_name(wheel.name) != search.canonical:
                    self._log_skipped_link(
                        link, 'wrong project name (not %s)' % search.supplied)
                    return None

                if not self.candidate_evaluator.is_wheel_supported(wheel):
                    self._log_skipped_link(
                        link, 'it is not compatible with this Python')
                    return None

                version = wheel.version

        # This should be up by the search.ok_binary check, but see issue 2700.
        if "source" not in search.formats and ext != WHEEL_EXTENSION:
            self._log_skipped_link(
                link,
                'No sources permitted for %s' % search.supplied,
            )
            return None

        if not version:
            version = _egg_info_matches(egg_info, search.canonical)
        if not version:
            self._log_skipped_link(
                link, 'Missing project version for %s' % search.supplied)
            return None

        match = self._py_version_re.search(version)
        if match:
            version = version[:match.start()]
            py_version = match.group(1)
            if py_version != sys.version[:3]:
                self._log_skipped_link(link, 'Python version is incorrect')
                return None
        try:
            support_this_python = check_requires_python(link.requires_python)
        except specifiers.InvalidSpecifier:
            logger.debug("Package %s has an invalid Requires-Python entry: %s",
                         link.filename, link.requires_python)
            support_this_python = True

        if not support_this_python:
            logger.debug(
                "The package %s is incompatible with the python "
                "version in use. Acceptable python versions are: %s", link,
                link.requires_python)
            return None
        logger.debug('Found link %s, version: %s', link, version)

        return InstallationCandidate(search.supplied, version, link)
Пример #14
0
    def find_all_candidates(self, project_name):
        # type: (str) -> List[Optional[InstallationCandidate]]
        """Find all available InstallationCandidate for project_name

        This checks index_urls and find_links.
        All versions found are returned as an InstallationCandidate list.

        See _link_package_versions for details on which files are accepted
        """
        index_locations = self._get_index_urls_locations(project_name)
        index_file_loc, index_url_loc = self._sort_locations(index_locations)
        fl_file_loc, fl_url_loc = self._sort_locations(
            self.find_links,
            expand_dir=True,
        )

        file_locations = (Link(url) for url in itertools.chain(
            index_file_loc,
            fl_file_loc,
        ))

        # We trust every url that the user has given us whether it was given
        #   via --index-url or --find-links.
        # We want to filter out any thing which does not have a secure origin.
        url_locations = [
            link for link in itertools.chain(
                (Link(url) for url in index_url_loc),
                (Link(url) for url in fl_url_loc),
            ) if self._validate_secure_origin(logger, link)
        ]

        logger.debug('%d location(s) to search for versions of %s:',
                     len(url_locations), project_name)

        for location in url_locations:
            logger.debug('* %s', location)

        canonical_name = canonicalize_name(project_name)
        formats = self.format_control.get_allowed_formats(canonical_name)
        search = Search(project_name, canonical_name, formats)
        find_links_versions = self._package_versions(
            # We trust every directly linked archive in find_links
            (Link(url, '-f') for url in self.find_links),
            search)

        page_versions = []
        for page in self._get_pages(url_locations, project_name):
            logger.debug('Analyzing links from page %s', page.url)
            with indent_log():
                page_versions.extend(
                    self._package_versions(page.iter_links(), search))

        file_versions = self._package_versions(file_locations, search)
        if file_versions:
            file_versions.sort(reverse=True)
            logger.debug(
                'Local files found: %s', ', '.join([
                    url_to_path(candidate.location.url)
                    for candidate in file_versions
                ]))

        # This is an intentional priority ordering
        return file_versions + find_links_versions + page_versions
Пример #15
0
def freeze(
    requirement=None,  # type: Optional[List[str]]
    find_links=None,  # type: Optional[List[str]]
    local_only=None,  # type: Optional[bool]
    user_only=None,  # type: Optional[bool]
    skip_regex=None,  # type: Optional[str]
    isolated=False,  # type: bool
    wheel_cache=None,  # type: Optional[WheelCache]
    exclude_editable=False,  # type: bool
    skip=()  # type: Container[str]
):
    # type: (...) -> Iterator[str]
    find_links = find_links or []
    skip_match = None

    if skip_regex:
        skip_match = re.compile(skip_regex).search

    for link in find_links:
        yield '-f %s' % link
    installations = {}  # type: Dict[str, FrozenRequirement]
    for dist in get_installed_distributions(local_only=local_only,
                                            skip=(),
                                            user_only=user_only):
        try:
            req = FrozenRequirement.from_dist(dist)
        except RequirementParseError:
            logger.warning(
                "Could not parse requirement: %s",
                dist.project_name
            )
            continue
        if exclude_editable and req.editable:
            continue
        installations[req.name] = req

    if requirement:
        # the options that don't get turned into an InstallRequirement
        # should only be emitted once, even if the same option is in multiple
        # requirements files, so we need to keep track of what has been emitted
        # so that we don't emit it again if it's seen again
        emitted_options = set()  # type: Set[str]
        # keep track of which files a requirement is in so that we can
        # give an accurate warning if a requirement appears multiple times.
        req_files = collections.defaultdict(list)  # type: Dict[str, List[str]]
        for req_file_path in requirement:
            with open(req_file_path) as req_file:
                for line in req_file:
                    if (not line.strip() or
                            line.strip().startswith('#') or
                            (skip_match and skip_match(line)) or
                            line.startswith((
                                '-r', '--requirement',
                                '-Z', '--always-unzip',
                                '-f', '--find-links',
                                '-i', '--index-url',
                                '--pre',
                                '--trusted-host',
                                '--process-dependency-links',
                                '--extra-index-url'))):
                        line = line.rstrip()
                        if line not in emitted_options:
                            emitted_options.add(line)
                            yield line
                        continue

                    if line.startswith('-e') or line.startswith('--editable'):
                        if line.startswith('-e'):
                            line = line[2:].strip()
                        else:
                            line = line[len('--editable'):].strip().lstrip('=')
                        line_req = install_req_from_editable(
                            line,
                            isolated=isolated,
                            wheel_cache=wheel_cache,
                        )
                    else:
                        line_req = install_req_from_line(
                            COMMENT_RE.sub('', line).strip(),
                            isolated=isolated,
                            wheel_cache=wheel_cache,
                        )

                    if not line_req.name:
                        logger.info(
                            "Skipping line in requirement file [%s] because "
                            "it's not clear what it would install: %s",
                            req_file_path, line.strip(),
                        )
                        logger.info(
                            "  (add #egg=PackageName to the URL to avoid"
                            " this warning)"
                        )
                    elif line_req.name not in installations:
                        # either it's not installed, or it is installed
                        # but has been processed already
                        if not req_files[line_req.name]:
                            logger.warning(
                                "Requirement file [%s] contains %s, but "
                                "package %r is not installed",
                                req_file_path,
                                COMMENT_RE.sub('', line).strip(), line_req.name
                            )
                        else:
                            req_files[line_req.name].append(req_file_path)
                    else:
                        yield str(installations[line_req.name]).rstrip()
                        del installations[line_req.name]
                        req_files[line_req.name].append(req_file_path)

        # Warn about requirements that were included multiple times (in a
        # single requirements file or in different requirements files).
        for name, files in six.iteritems(req_files):
            if len(files) > 1:
                logger.warning("Requirement %s included multiple times [%s]",
                               name, ', '.join(sorted(set(files))))

        yield(
            '## The following requirements were added by '
            'ins freeze:'
        )
    for installation in sorted(
            installations.values(), key=lambda x: x.name.lower()):
        if canonicalize_name(installation.name) not in skip:
            yield str(installation).rstrip()
Пример #16
0
def search_packages_info(query):
    """
    Gather details from installed distributions. Print distribution name,
    version, location, and installed files. Installed files requires a
    ins generated 'installed-files.txt' in the distributions '.egg-info'
    directory.
    """
    installed = {}
    for p in pkg_resources.working_set:
        installed[canonicalize_name(p.project_name)] = p

    query_names = [canonicalize_name(name) for name in query]

    for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
        package = {
            'name': dist.project_name,
            'version': dist.version,
            'location': dist.location,
            'requires': [dep.project_name for dep in dist.requires()],
        }
        file_list = None
        metadata = None
        if isinstance(dist, pkg_resources.DistInfoDistribution):
            # RECORDs should be part of .dist-info metadatas
            if dist.has_metadata('RECORD'):
                lines = dist.get_metadata_lines('RECORD')
                paths = [l.split(',')[0] for l in lines]
                paths = [os.path.join(dist.location, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata('METADATA'):
                metadata = dist.get_metadata('METADATA')
        else:
            # Otherwise use ins's log for .egg-info's
            if dist.has_metadata('installed-files.txt'):
                paths = dist.get_metadata_lines('installed-files.txt')
                paths = [os.path.join(dist.egg_info, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata('PKG-INFO'):
                metadata = dist.get_metadata('PKG-INFO')

        if dist.has_metadata('entry_points.txt'):
            entry_points = dist.get_metadata_lines('entry_points.txt')
            package['entry_points'] = entry_points

        if dist.has_metadata('INSTALLER'):
            for line in dist.get_metadata_lines('INSTALLER'):
                if line.strip():
                    package['installer'] = line.strip()
                    break

        # @todo: Should pkg_resources.Distribution have a
        # `get_pkg_info` method?
        feed_parser = FeedParser()
        feed_parser.feed(metadata)
        pkg_info_dict = feed_parser.close()
        for key in ('metadata-version', 'summary', 'home-page', 'author',
                    'author-email', 'license'):
            package[key] = pkg_info_dict.get(key)

        # It looks like FeedParser cannot deal with repeated headers
        classifiers = []
        for line in metadata.splitlines():
            if line.startswith('Classifier: '):
                classifiers.append(line[len('Classifier: '):])
        package['classifiers'] = classifiers

        if file_list:
            package['files'] = sorted(file_list)
        yield package