Exemple #1
0
def version_satisfies_spec(spec, version):
    if not spec:
        return True
    if not version:
        return False
    if not isinstance(spec, SpecifierSet):
        spec = SpecifierSet(spec)
    if not isinstance(version, Version):
        version = Version(version)
    return spec.contains(version)
Exemple #2
0
    def test_compute_best_candidate__none_best(self):
        """
        Test returning a None best candidate.
        """
        specifier = SpecifierSet('<= 1.10')
        versions = ['1.11', '1.12']
        candidates = [
            make_mock_candidate(version) for version in versions
        ]
        evaluator = CandidateEvaluator.create(
            'my-project',
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        assert result._applicable_candidates == []
        assert result.best_candidate is None
Exemple #3
0
    def test_link_sorting(self) -> None:
        """
        Test link sorting
        """
        links = [
            InstallationCandidate("simple", "2.0", Link("simple-2.0.tar.gz")),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0-pyT-none-TEST.whl"),
            ),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0-pyT-TEST-any.whl"),
            ),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0-pyT-none-any.whl"),
            ),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0.tar.gz"),
            ),
        ]
        valid_tags = [
            Tag("pyT", "none", "TEST"),
            Tag("pyT", "TEST", "any"),
            Tag("pyT", "none", "any"),
        ]
        specifier = SpecifierSet()
        evaluator = CandidateEvaluator(
            "my-project",
            supported_tags=valid_tags,
            specifier=specifier,
        )
        sort_key = evaluator._sort_key
        results = sorted(links, key=sort_key, reverse=True)
        results2 = sorted(reversed(links), key=sort_key, reverse=True)

        assert links == results, results
        assert links == results2, results2
Exemple #4
0
    def test_link_sorting(self):
        """
        Test link sorting
        """
        links = [
            InstallationCandidate("simple", "2.0", Link('simple-2.0.tar.gz')),
            InstallationCandidate(
                "simple",
                "1.0",
                Link('simple-1.0-pyT-none-TEST.whl'),
            ),
            InstallationCandidate(
                "simple",
                '1.0',
                Link('simple-1.0-pyT-TEST-any.whl'),
            ),
            InstallationCandidate(
                "simple",
                '1.0',
                Link('simple-1.0-pyT-none-any.whl'),
            ),
            InstallationCandidate(
                "simple",
                '1.0',
                Link('simple-1.0.tar.gz'),
            ),
        ]
        valid_tags = [
            Tag('pyT', 'none', 'TEST'),
            Tag('pyT', 'TEST', 'any'),
            Tag('pyT', 'none', 'any'),
        ]
        specifier = SpecifierSet()
        evaluator = CandidateEvaluator(
            'my-project',
            supported_tags=valid_tags,
            specifier=specifier,
        )
        sort_key = evaluator._sort_key
        results = sorted(links, key=sort_key, reverse=True)
        results2 = sorted(reversed(links), key=sort_key, reverse=True)

        assert links == results, results
        assert links == results2, results2
Exemple #5
0
    def test_make_found_candidates(self):
        specifier = SpecifierSet('<= 1.11')
        versions = ['1.10', '1.11', '1.12']
        candidates = [
            self.make_mock_candidate(version) for version in versions
        ]
        evaluator = CandidateEvaluator()
        found_candidates = evaluator.make_found_candidates(
            candidates, specifier=specifier,
        )

        assert found_candidates._candidates == candidates
        assert found_candidates._evaluator is evaluator
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            '1.10',
            '1.11',
        ]
        assert found_candidates._applicable_candidates == expected_applicable
Exemple #6
0
def identify_requirement_conflicts(environment):
    packages = environment['packages']
    for pkg in packages.values():
        for key, version in pkg['requirements'].items():
            if key in packages:
                if version:
                    if packages[key]['version'] not in SpecifierSet(version):
                        pkg['issues'].append({
                            'type': 'REQ_INVALID',
                            'key': key,
                            'requirement': version,
                            'installed': packages[key]['version'],
                        })
            else:
                pkg['issues'].append({
                    'type': 'REQ_MISSING',
                    'key': key,
                    'requirement': version,
                })
Exemple #7
0
    def test_compute_best_candidate(self):
        specifier = SpecifierSet('<= 1.11')
        versions = ['1.10', '1.11', '1.12']
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            'my-project',
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            '1.10',
            '1.11',
        ]
        assert result._applicable_candidates == expected_applicable

        assert result.best_candidate is expected_applicable[1]
Exemple #8
0
    def test_compute_best_candidate(self) -> None:
        specifier = SpecifierSet("<= 1.11")
        versions = ["1.10", "1.11", "1.12"]
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            "my-project",
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            "1.10",
            "1.11",
        ]
        assert result._applicable_candidates == expected_applicable

        assert result.best_candidate is expected_applicable[1]
Exemple #9
0
def add_requirements(user_reqs, env=None):
    """
        create/update setup.cfg file
    Args:
        user_reqs (RequirementSet): list of user requirements
        file_obj: file object to write to
    """
    config = _read_config()

    reqs = {}
    for req in user_reqs.requirements.values():  # type: InstallRequirement
        if not req.comes_from:  # add only top-level dependencies
            if not req.req.specifier and req.installed_version:
                req.req.specifier = SpecifierSet("~=" +
                                                 str(req.installed_version))
            reqs[req.req.name] = str(req.req)
    if reqs:
        update_config(config, env, reqs)
    _write_to_file(config)
    return config
Exemple #10
0
    def test_build_tag_is_less_important_than_other_tags(self) -> None:
        links = [
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0-1-py3-abi3-linux_x86_64.whl"),
            ),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0-2-py3-abi3-linux_i386.whl"),
            ),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0-2-py3-any-none.whl"),
            ),
            InstallationCandidate(
                "simple",
                "1.0",
                Link("simple-1.0.tar.gz"),
            ),
        ]
        valid_tags = [
            Tag("py3", "abi3", "linux_x86_64"),
            Tag("py3", "abi3", "linux_i386"),
            Tag("py3", "any", "none"),
        ]
        evaluator = CandidateEvaluator(
            "my-project",
            supported_tags=valid_tags,
            specifier=SpecifierSet(),
        )
        sort_key = evaluator._sort_key
        results = sorted(links, key=sort_key, reverse=True)
        results2 = sorted(reversed(links), key=sort_key, reverse=True)

        assert links == results, results
        assert links == results2, results2
Exemple #11
0
    def test_build_tag_is_less_important_than_other_tags(self):
        links = [
            InstallationCandidate(
                "simple",
                "1.0",
                Link('simple-1.0-1-py3-abi3-linux_x86_64.whl'),
            ),
            InstallationCandidate(
                "simple",
                '1.0',
                Link('simple-1.0-2-py3-abi3-linux_i386.whl'),
            ),
            InstallationCandidate(
                "simple",
                '1.0',
                Link('simple-1.0-2-py3-any-none.whl'),
            ),
            InstallationCandidate(
                "simple",
                '1.0',
                Link('simple-1.0.tar.gz'),
            ),
        ]
        valid_tags = [
            Tag('py3', 'abi3', 'linux_x86_64'),
            Tag('py3', 'abi3', 'linux_i386'),
            Tag('py3', 'any', 'none'),
        ]
        evaluator = CandidateEvaluator(
            'my-project',
            supported_tags=valid_tags,
            specifier=SpecifierSet(),
        )
        sort_key = evaluator._sort_key
        results = sorted(links, key=sort_key, reverse=True)
        results2 = sorted(reversed(links), key=sort_key, reverse=True)

        assert links == results, results
        assert links == results2, results2
Exemple #12
0
    def _get_requires_python_dependency(self):

        # type: () -> Optional[Requirement]

        requires_python = get_requires_python(self.dist)

        if requires_python is None:

            return None

        try:

            spec = SpecifierSet(requires_python)

        except InvalidSpecifier as e:

            message = "Package %r has an invalid Requires-Python: %s"

            logger.warning(message, self.name, e)

            return None

        return self._factory.make_requires_python_requirement(spec)
Exemple #13
0
    def test_make_candidate_evaluator(
        self,
        allow_all_prereleases: bool,
        prefer_binary: bool,
    ) -> None:
        target_python = TargetPython()
        target_python._valid_tags = [Tag("py36", "none", "any")]
        candidate_prefs = CandidatePreferences(
            prefer_binary=prefer_binary,
            allow_all_prereleases=allow_all_prereleases,
        )
        link_collector = LinkCollector(
            session=PipSession(),
            search_scope=SearchScope([], []),
        )
        finder = PackageFinder(
            link_collector=link_collector,
            target_python=target_python,
            allow_yanked=True,
            candidate_prefs=candidate_prefs,
            use_deprecated_html5lib=False,
        )

        specifier = SpecifierSet()
        # Pass hashes to check that _hashes is set.
        hashes = Hashes({"sha256": [64 * "a"]})
        evaluator = finder.make_candidate_evaluator(
            "my-project",
            specifier=specifier,
            hashes=hashes,
        )
        assert evaluator._allow_all_prereleases == allow_all_prereleases
        assert evaluator._hashes == hashes
        assert evaluator._prefer_binary == prefer_binary
        assert evaluator._project_name == "my-project"
        assert evaluator._specifier is specifier
        assert evaluator._supported_tags == [Tag("py36", "none", "any")]
Exemple #14
0
    def test_make_candidate_evaluator(
        self,
        allow_all_prereleases,
        prefer_binary,
    ):
        target_python = TargetPython()
        target_python._valid_tags = [('py36', 'none', 'any')]
        candidate_prefs = CandidatePreferences(
            prefer_binary=prefer_binary,
            allow_all_prereleases=allow_all_prereleases,
        )
        link_collector = LinkCollector(
            session=PipSession(),
            search_scope=SearchScope([], []),
        )
        finder = PackageFinder(
            link_collector=link_collector,
            target_python=target_python,
            allow_yanked=True,
            candidate_prefs=candidate_prefs,
        )

        specifier = SpecifierSet()
        # Pass hashes to check that _hashes is set.
        hashes = Hashes({'sha256': [64 * 'a']})
        evaluator = finder.make_candidate_evaluator(
            'my-project',
            specifier=specifier,
            hashes=hashes,
        )
        assert evaluator._allow_all_prereleases == allow_all_prereleases
        assert evaluator._hashes == hashes
        assert evaluator._prefer_binary == prefer_binary
        assert evaluator._project_name == 'my-project'
        assert evaluator._specifier is specifier
        assert evaluator._supported_tags == [('py36', 'none', 'any')]
def test_SpecifierRequirement_multiple_specifiers(mocker):
    pkg_src = PackageSource(PipProvider, None)
    pkg_name = 'numpy'
    expected_range_v1 = Range(Version(1, 0, 0), None, True, False)
    expected_range_v2 = Range(None, Version(1, 5, 0), False, False)
    expected_range_v3 = Range(Version(1, 2, 0), None, False, False)
    expected_range_v4 = Range(None, Version(1, 2, 0), False, False)

    expected_constraint = Constraint(
        Package(pkg_name),
        Union(expected_range_v1, expected_range_v2, expected_range_v3,
              expected_range_v4))

    mocker.patch('src.package_source.PackageSource.parse_specifier',
                 side_effect=[[expected_range_v1], [expected_range_v2],
                              [expected_range_v3, expected_range_v4]])

    test_spififerset = SpecifierSet(">=1.0.0,<1.5.0,!=1.2.0", None)
    test_ireq = my_ireq(my_req(test_spififerset, pkg_name))
    test_requirement = my_SpecifierRequirement(test_ireq, None)

    result = pkg_src.convert_requirement(test_requirement)

    assert result == expected_constraint
Exemple #16
0
class TestCandidateEvaluator:
    @pytest.mark.parametrize('allow_all_prereleases, prefer_binary', [
        (False, False),
        (False, True),
        (True, False),
        (True, True),
    ])
    def test_create(self, allow_all_prereleases, prefer_binary):
        target_python = TargetPython()
        target_python._valid_tags = [('py36', 'none', 'any')]
        specifier = SpecifierSet()
        evaluator = CandidateEvaluator.create(
            project_name='my-project',
            target_python=target_python,
            allow_all_prereleases=allow_all_prereleases,
            prefer_binary=prefer_binary,
            specifier=specifier,
        )
        assert evaluator._allow_all_prereleases == allow_all_prereleases
        assert evaluator._prefer_binary == prefer_binary
        assert evaluator._specifier is specifier
        assert evaluator._supported_tags == [('py36', 'none', 'any')]

    def test_create__target_python_none(self):
        """
        Test passing target_python=None.
        """
        evaluator = CandidateEvaluator.create('my-project')
        expected_tags = get_supported()
        assert evaluator._supported_tags == expected_tags

    def test_create__specifier_none(self):
        """
        Test passing specifier=None.
        """
        evaluator = CandidateEvaluator.create('my-project')
        expected_specifier = SpecifierSet()
        assert evaluator._specifier == expected_specifier

    def test_get_applicable_candidates(self):
        specifier = SpecifierSet('<= 1.11')
        versions = ['1.10', '1.11', '1.12']
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            'my-project',
            specifier=specifier,
        )
        actual = evaluator.get_applicable_candidates(candidates)
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            '1.10',
            '1.11',
        ]
        assert actual == expected_applicable

    @pytest.mark.parametrize(
        'specifier, expected_versions',
        [
            # Test no version constraint.
            (SpecifierSet(), ['1.0', '1.2']),
            # Test a version constraint that excludes the candidate whose
            # hash matches.  Then the non-allowed hash is a candidate.
            (SpecifierSet('<= 1.1'), ['1.0', '1.1']),
        ])
    def test_get_applicable_candidates__hashes(
        self,
        specifier,
        expected_versions,
    ):
        """
        Test a non-None hashes value.
        """
        candidates = [
            make_mock_candidate('1.0'),
            make_mock_candidate('1.1', hex_digest=(64 * 'a')),
            make_mock_candidate('1.2', hex_digest=(64 * 'b')),
        ]
        hashes_data = {
            'sha256': [64 * 'b'],
        }
        hashes = Hashes(hashes_data)
        evaluator = CandidateEvaluator.create(
            'my-project',
            specifier=specifier,
            hashes=hashes,
        )
        actual = evaluator.get_applicable_candidates(candidates)
        actual_versions = [str(c.version) for c in actual]
        assert actual_versions == expected_versions

    def test_compute_best_candidate(self):
        specifier = SpecifierSet('<= 1.11')
        versions = ['1.10', '1.11', '1.12']
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            'my-project',
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            '1.10',
            '1.11',
        ]
        assert result._applicable_candidates == expected_applicable

        assert result.best_candidate is expected_applicable[1]

    def test_compute_best_candidate__none_best(self):
        """
        Test returning a None best candidate.
        """
        specifier = SpecifierSet('<= 1.10')
        versions = ['1.11', '1.12']
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            'my-project',
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        assert result._applicable_candidates == []
        assert result.best_candidate is None

    @pytest.mark.parametrize(
        'hex_digest, expected',
        [
            # Test a link with no hash.
            (None, 0),
            # Test a link with an allowed hash.
            (64 * 'a', 1),
            # Test a link with a hash that isn't allowed.
            (64 * 'b', 0),
        ])
    def test_sort_key__hash(self, hex_digest, expected):
        """
        Test the effect of the link's hash on _sort_key()'s return value.
        """
        candidate = make_mock_candidate('1.0', hex_digest=hex_digest)
        hashes_data = {
            'sha256': [64 * 'a'],
        }
        hashes = Hashes(hashes_data)
        evaluator = CandidateEvaluator.create('my-project', hashes=hashes)
        sort_value = evaluator._sort_key(candidate)
        # The hash is reflected in the first element of the tuple.
        actual = sort_value[0]
        assert actual == expected

    @pytest.mark.parametrize(
        'yanked_reason, expected',
        [
            # Test a non-yanked file.
            (None, 0),
            # Test a yanked file (has a lower value than non-yanked).
            ('bad metadata', -1),
        ])
    def test_sort_key__is_yanked(self, yanked_reason, expected):
        """
        Test the effect of is_yanked on _sort_key()'s return value.
        """
        candidate = make_mock_candidate('1.0', yanked_reason=yanked_reason)
        evaluator = CandidateEvaluator.create('my-project')
        sort_value = evaluator._sort_key(candidate)
        # Yanked / non-yanked is reflected in the second element of the tuple.
        actual = sort_value[1]
        assert actual == expected

    def test_sort_best_candidate__no_candidates(self):
        """
        Test passing an empty list.
        """
        evaluator = CandidateEvaluator.create('my-project')
        actual = evaluator.sort_best_candidate([])
        assert actual is None

    def test_sort_best_candidate__all_yanked(self, caplog):
        """
        Test all candidates yanked.
        """
        candidates = [
            make_mock_candidate('1.0', yanked_reason='bad metadata #1'),
            # Put the best candidate in the middle, to test sorting.
            make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
            make_mock_candidate('2.0', yanked_reason='bad metadata #2'),
        ]
        expected_best = candidates[1]
        evaluator = CandidateEvaluator.create('my-project')
        actual = evaluator.sort_best_candidate(candidates)
        assert actual is expected_best
        assert str(actual.version) == '3.0'

        # Check the log messages.
        assert len(caplog.records) == 1
        record = caplog.records[0]
        assert record.levelname == 'WARNING'
        assert record.message == (
            'The candidate selected for download or install is a yanked '
            "version: 'mypackage' candidate "
            '(version 3.0 at https://example.com/pkg-3.0.tar.gz)\n'
            'Reason for being yanked: bad metadata #3')

    @pytest.mark.parametrize(
        'yanked_reason, expected_reason',
        [
            # Test no reason given.
            ('', '<none given>'),
            # Test a unicode string with a non-ascii character.
            (u'curly quote: \u2018', u'curly quote: \u2018'),
        ])
    def test_sort_best_candidate__yanked_reason(
        self,
        caplog,
        yanked_reason,
        expected_reason,
    ):
        """
        Test the log message with various reason strings.
        """
        candidates = [
            make_mock_candidate('1.0', yanked_reason=yanked_reason),
        ]
        evaluator = CandidateEvaluator.create('my-project')
        actual = evaluator.sort_best_candidate(candidates)
        assert str(actual.version) == '1.0'

        assert len(caplog.records) == 1
        record = caplog.records[0]
        assert record.levelname == 'WARNING'
        expected_message = (
            'The candidate selected for download or install is a yanked '
            "version: 'mypackage' candidate "
            '(version 1.0 at https://example.com/pkg-1.0.tar.gz)\n'
            'Reason for being yanked: ') + expected_reason
        assert record.message == expected_message

    def test_sort_best_candidate__best_yanked_but_not_all(
        self,
        caplog,
    ):
        """
        Test the best candidates being yanked, but not all.
        """
        candidates = [
            make_mock_candidate('4.0', yanked_reason='bad metadata #4'),
            # Put the best candidate in the middle, to test sorting.
            make_mock_candidate('2.0'),
            make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
            make_mock_candidate('1.0'),
        ]
        expected_best = candidates[1]
        evaluator = CandidateEvaluator.create('my-project')
        actual = evaluator.sort_best_candidate(candidates)
        assert actual is expected_best
        assert str(actual.version) == '2.0'

        # Check the log messages.
        assert len(caplog.records) == 0
Exemple #17
0
        return self._dist

    def _get_requires_python_dependency(self):
        # type: () -> Optional[Requirement]
=======
            self._fetch_metadata()
        return self._dist

    def _get_requires_python_specifier(self):
        # type: () -> Optional[SpecifierSet]
>>>>>>> 74c061954d5e927be4caafbd793e96a50563c265
        requires_python = get_requires_python(self.dist)
        if requires_python is None:
            return None
        try:
            spec = SpecifierSet(requires_python)
        except InvalidSpecifier as e:
<<<<<<< HEAD
            message = "Package %r has an invalid Requires-Python: %s"
            logger.warning(message, self.name, e)
            return None
        return self._factory.make_requires_python_requirement(spec)

    def iter_dependencies(self, with_requires):
        # type: (bool) -> Iterable[Optional[Requirement]]
        requires = self.dist.requires() if with_requires else ()
        for r in requires:
            yield self._factory.make_requirement_from_spec(str(r), self._ireq)
        yield self._get_requires_python_dependency()
=======
            logger.warning(
Exemple #18
0
def test_new_resolver_correct_number_of_matches(test_cases, factory):
    """Requirements should return the correct number of candidates"""
    for spec, name, matches in test_cases:
        req = factory.make_requirement_from_spec(spec, comes_from=None)
        assert len(req.find_matches(SpecifierSet())) == matches
Exemple #19
0
def get_specifier(version_str: Union[SpecifierSet, str]) -> SpecifierSet:
    if isinstance(version_str, SpecifierSet):
        return version_str
    if not version_str or version_str == "*":
        return SpecifierSet()
    return SpecifierSet(version_str)
Exemple #20
0
    def _iter_found_candidates(
        self,
        ireqs: Sequence[InstallRequirement],
        specifier: SpecifierSet,
        hashes: Hashes,
        prefers_installed: bool,
        incompatible_ids: Set[int],
    ) -> Iterable[Candidate]:
        if not ireqs:
            return ()

        # The InstallRequirement implementation requires us to give it a
        # "template". Here we just choose the first requirement to represent
        # all of them.
        # Hopefully the Project model can correct this mismatch in the future.
        template = ireqs[0]
        assert template.req, "Candidates found on index must be PEP 508"
        name = canonicalize_name(template.req.name)

        extras = frozenset()  # type: FrozenSet[str]
        for ireq in ireqs:
            assert ireq.req, "Candidates found on index must be PEP 508"
            specifier &= ireq.req.specifier
            hashes &= ireq.hashes(trust_internet=False)
            extras |= frozenset(ireq.extras)

        # Get the installed version, if it matches, unless the user
        # specified `--force-reinstall`, when we want the version from
        # the index instead.
        installed_candidate = None
        if not self._force_reinstall and name in self._installed_dists:
            installed_dist = self._installed_dists[name]
            if specifier.contains(installed_dist.version, prereleases=True):
                installed_candidate = self._make_candidate_from_dist(
                    dist=installed_dist,
                    extras=extras,
                    template=template,
                )

        def iter_index_candidate_infos():
            # type: () -> Iterator[IndexCandidateInfo]
            result = self._finder.find_best_candidate(
                project_name=name,
                specifier=specifier,
                hashes=hashes,
            )
            icans = list(result.iter_applicable())

            # PEP 592: Yanked releases must be ignored unless only yanked
            # releases can satisfy the version range. So if this is false,
            # all yanked icans need to be skipped.
            all_yanked = all(ican.link.is_yanked for ican in icans)

            # PackageFinder returns earlier versions first, so we reverse.
            for ican in reversed(icans):
                if not all_yanked and ican.link.is_yanked:
                    continue
                func = functools.partial(
                    self._make_candidate_from_link,
                    link=ican.link,
                    extras=extras,
                    template=template,
                    name=name,
                    version=ican.version,
                )
                yield ican.version, func

        return FoundCandidates(
            iter_index_candidate_infos,
            installed_candidate,
            prefers_installed,
            incompatible_ids,
        )
Exemple #21
0
 def find_matches(self, requirement):
     # type: (Requirement) -> Sequence[Candidate]
     constraint = self._constraints.get(requirement.name, SpecifierSet())
     matches = requirement.find_matches(constraint)
     return self._sort_matches(matches)
def contains_version(version: str, version_specifier: str) -> bool:
    specifier = SpecifierSet(version_specifier)
    return specifier.contains(version)
Exemple #23
0
def _check_plugin_version(plugin_module, plugin_name):
    if hasattr(plugin_module, "pyb_version") and plugin_module.pyb_version:
        required_pyb_version = SpecifierSet(plugin_module.pyb_version, True)
        if not required_pyb_version.contains(PYB_VERSION):
            raise IncompatiblePluginException(plugin_name, required_pyb_version, PYB_VERSION)
Exemple #24
0
 def version(self, v: str) -> None:
     if not v or v == "*":
         self.specifier = SpecifierSet()
     else:
         self.specifier = get_specifier(f"=={v}")
Exemple #25
0
 def empty(cls):
     # type: () -> Constraint
     return Constraint(SpecifierSet(), Hashes())
if TYPE_CHECKING:
    from typing import Any
    from typing import Dict
    from typing import Generator
    from typing import List
    from typing import MutableMapping
    from typing import Optional
    from typing import Tuple
    from typing import Union
    from pip._internal.req.req_file import ParsedRequirement

_DEFAULT_INDEX_URLS = ("https://pypi.org/simple", )
_MAX_DIR_TRAVERSAL = 42  # Avoid any symlinks that would loop.
_PIP_BIN = os.getenv("MICROPIPENV_PIP_BIN", "pip")
_SUPPORTED_PIP = SpecifierSet(_SUPPORTED_PIP_STR)
_DEBUG = int(os.getenv("MICROPIPENV_DEBUG", 0))
_NO_LOCKFILE_PRINT = int(os.getenv("MICROPIPENV_NO_LOCKFILE_PRINT", 0))
_NO_LOCKFILE_WRITE = int(os.getenv("MICROPIPENV_NO_LOCKFILE_WRITE", 0))
_FILE_METHOD_MAP = OrderedDict([  # The order here defines priorities
    ("Pipfile.lock", "pipenv"),
    ("poetry.lock", "poetry"),
    ("requirements.txt", "requirements"),
])


class MicropipenvException(Exception):
    """A base class for all micropipenv exceptions."""


class FileNotFound(MicropipenvException):
Exemple #27
0
 def empty(cls) -> "Constraint":
     return Constraint(SpecifierSet(), Hashes(), frozenset())
Exemple #28
0
class TestCandidateEvaluator:
    @pytest.mark.parametrize(
        "allow_all_prereleases, prefer_binary",
        [
            (False, False),
            (False, True),
            (True, False),
            (True, True),
        ],
    )
    def test_create(self, allow_all_prereleases: bool,
                    prefer_binary: bool) -> None:
        target_python = TargetPython()
        target_python._valid_tags = [Tag("py36", "none", "any")]
        specifier = SpecifierSet()
        evaluator = CandidateEvaluator.create(
            project_name="my-project",
            target_python=target_python,
            allow_all_prereleases=allow_all_prereleases,
            prefer_binary=prefer_binary,
            specifier=specifier,
        )
        assert evaluator._allow_all_prereleases == allow_all_prereleases
        assert evaluator._prefer_binary == prefer_binary
        assert evaluator._specifier is specifier
        assert evaluator._supported_tags == [Tag("py36", "none", "any")]

    def test_create__target_python_none(self) -> None:
        """
        Test passing target_python=None.
        """
        evaluator = CandidateEvaluator.create("my-project")
        expected_tags = get_supported()
        assert evaluator._supported_tags == expected_tags

    def test_create__specifier_none(self) -> None:
        """
        Test passing specifier=None.
        """
        evaluator = CandidateEvaluator.create("my-project")
        expected_specifier = SpecifierSet()
        assert evaluator._specifier == expected_specifier

    def test_get_applicable_candidates(self) -> None:
        specifier = SpecifierSet("<= 1.11")
        versions = ["1.10", "1.11", "1.12"]
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            "my-project",
            specifier=specifier,
        )
        actual = evaluator.get_applicable_candidates(candidates)
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            "1.10",
            "1.11",
        ]
        assert actual == expected_applicable

    @pytest.mark.parametrize(
        "specifier, expected_versions",
        [
            # Test no version constraint.
            (SpecifierSet(), ["1.0", "1.2"]),
            # Test a version constraint that excludes the candidate whose
            # hash matches.  Then the non-allowed hash is a candidate.
            (SpecifierSet("<= 1.1"), ["1.0", "1.1"]),
        ],
    )
    def test_get_applicable_candidates__hashes(
        self,
        specifier: SpecifierSet,
        expected_versions: List[str],
    ) -> None:
        """
        Test a non-None hashes value.
        """
        candidates = [
            make_mock_candidate("1.0"),
            make_mock_candidate("1.1", hex_digest=(64 * "a")),
            make_mock_candidate("1.2", hex_digest=(64 * "b")),
        ]
        hashes_data = {
            "sha256": [64 * "b"],
        }
        hashes = Hashes(hashes_data)
        evaluator = CandidateEvaluator.create(
            "my-project",
            specifier=specifier,
            hashes=hashes,
        )
        actual = evaluator.get_applicable_candidates(candidates)
        actual_versions = [str(c.version) for c in actual]
        assert actual_versions == expected_versions

    def test_compute_best_candidate(self) -> None:
        specifier = SpecifierSet("<= 1.11")
        versions = ["1.10", "1.11", "1.12"]
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            "my-project",
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        expected_applicable = candidates[:2]
        assert [str(c.version) for c in expected_applicable] == [
            "1.10",
            "1.11",
        ]
        assert result._applicable_candidates == expected_applicable

        assert result.best_candidate is expected_applicable[1]

    def test_compute_best_candidate__none_best(self) -> None:
        """
        Test returning a None best candidate.
        """
        specifier = SpecifierSet("<= 1.10")
        versions = ["1.11", "1.12"]
        candidates = [make_mock_candidate(version) for version in versions]
        evaluator = CandidateEvaluator.create(
            "my-project",
            specifier=specifier,
        )
        result = evaluator.compute_best_candidate(candidates)

        assert result._candidates == candidates
        assert result._applicable_candidates == []
        assert result.best_candidate is None

    @pytest.mark.parametrize(
        "hex_digest, expected",
        [
            # Test a link with no hash.
            (None, 0),
            # Test a link with an allowed hash.
            (64 * "a", 1),
            # Test a link with a hash that isn't allowed.
            (64 * "b", 0),
        ],
    )
    def test_sort_key__hash(self, hex_digest: Optional[str],
                            expected: int) -> None:
        """
        Test the effect of the link's hash on _sort_key()'s return value.
        """
        candidate = make_mock_candidate("1.0", hex_digest=hex_digest)
        hashes_data = {
            "sha256": [64 * "a"],
        }
        hashes = Hashes(hashes_data)
        evaluator = CandidateEvaluator.create("my-project", hashes=hashes)
        sort_value = evaluator._sort_key(candidate)
        # The hash is reflected in the first element of the tuple.
        actual = sort_value[0]
        assert actual == expected

    @pytest.mark.parametrize(
        "yanked_reason, expected",
        [
            # Test a non-yanked file.
            (None, 0),
            # Test a yanked file (has a lower value than non-yanked).
            ("bad metadata", -1),
        ],
    )
    def test_sort_key__is_yanked(self, yanked_reason: Optional[str],
                                 expected: int) -> None:
        """
        Test the effect of is_yanked on _sort_key()'s return value.
        """
        candidate = make_mock_candidate("1.0", yanked_reason=yanked_reason)
        evaluator = CandidateEvaluator.create("my-project")
        sort_value = evaluator._sort_key(candidate)
        # Yanked / non-yanked is reflected in the second element of the tuple.
        actual = sort_value[1]
        assert actual == expected

    def test_sort_best_candidate__no_candidates(self) -> None:
        """
        Test passing an empty list.
        """
        evaluator = CandidateEvaluator.create("my-project")
        actual = evaluator.sort_best_candidate([])
        assert actual is None

    def test_sort_best_candidate__best_yanked_but_not_all(
        self,
        caplog: pytest.LogCaptureFixture,
    ) -> None:
        """
        Test the best candidates being yanked, but not all.
        """
        caplog.set_level(logging.INFO)
        candidates = [
            make_mock_candidate("4.0", yanked_reason="bad metadata #4"),
            # Put the best candidate in the middle, to test sorting.
            make_mock_candidate("2.0"),
            make_mock_candidate("3.0", yanked_reason="bad metadata #3"),
            make_mock_candidate("1.0"),
        ]
        expected_best = candidates[1]
        evaluator = CandidateEvaluator.create("my-project")
        actual = evaluator.sort_best_candidate(candidates)
        assert actual is expected_best
        assert str(actual.version) == "2.0"

        # Check the log messages.
        assert len(caplog.records) == 0