def test_create__target_python_none(self): """ Test passing target_python=None. """ evaluator = CandidateEvaluator.create('my-project') expected_tags = get_supported() assert evaluator._supported_tags == expected_tags
def test_link_sorting_wheels_with_build_tags(self) -> None: """Verify build tags affect sorting.""" links = [ InstallationCandidate( "simplewheel", "2.0", Link("simplewheel-2.0-1-py2.py3-none-any.whl"), ), InstallationCandidate( "simplewheel", "2.0", Link("simplewheel-2.0-py2.py3-none-any.whl"), ), InstallationCandidate( "simplewheel", "1.0", Link("simplewheel-1.0-py2.py3-none-any.whl"), ), ] candidate_evaluator = CandidateEvaluator.create("my-project") sort_key = candidate_evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results, results assert links == results2, results2
def test_get_applicable_candidates__hashes( self, specifier, expected_versions, ): """ Test a non-None hashes value. """ candidates = [ make_mock_candidate('1.0'), make_mock_candidate('1.1', hex_digest=(64 * 'a')), make_mock_candidate('1.2', hex_digest=(64 * 'b')), ] hashes_data = { 'sha256': [64 * 'b'], } hashes = Hashes(hashes_data) evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, hashes=hashes, ) actual = evaluator.get_applicable_candidates(candidates) actual_versions = [str(c.version) for c in actual] assert actual_versions == expected_versions
def test_create__specifier_none(self): """ Test passing specifier=None. """ evaluator = CandidateEvaluator.create('my-project') expected_specifier = SpecifierSet() assert evaluator._specifier == expected_specifier
def test_get_applicable_candidates__hashes( self, specifier: SpecifierSet, expected_versions: List[str], ) -> None: """ Test a non-None hashes value. """ candidates = [ make_mock_candidate("1.0"), make_mock_candidate("1.1", hex_digest=(64 * "a")), make_mock_candidate("1.2", hex_digest=(64 * "b")), ] hashes_data = { "sha256": [64 * "b"], } hashes = Hashes(hashes_data) evaluator = CandidateEvaluator.create( "my-project", specifier=specifier, hashes=hashes, ) actual = evaluator.get_applicable_candidates(candidates) actual_versions = [str(c.version) for c in actual] assert actual_versions == expected_versions
def test_sort_best_candidate__no_candidates(self): """ Test passing an empty list. """ evaluator = CandidateEvaluator.create('my-project') actual = evaluator.sort_best_candidate([]) assert actual is None
def test_sort_best_candidate__all_yanked(self, caplog): """ Test all candidates yanked. """ candidates = [ make_mock_candidate('1.0', yanked_reason='bad metadata #1'), # Put the best candidate in the middle, to test sorting. make_mock_candidate('3.0', yanked_reason='bad metadata #3'), make_mock_candidate('2.0', yanked_reason='bad metadata #2'), ] expected_best = candidates[1] evaluator = CandidateEvaluator.create('my-project') actual = evaluator.sort_best_candidate(candidates) assert actual is expected_best assert str(actual.version) == '3.0' # Check the log messages. assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' assert record.message == ( 'The candidate selected for download or install is a yanked ' "version: 'mypackage' candidate " '(version 3.0 at https://example.com/pkg-3.0.tar.gz)\n' 'Reason for being yanked: bad metadata #3')
def test_sort_best_candidate__yanked_reason( self, caplog, yanked_reason, expected_reason, ): """ Test the log message with various reason strings. """ candidates = [ make_mock_candidate('1.0', yanked_reason=yanked_reason), ] evaluator = CandidateEvaluator.create('my-project') actual = evaluator.sort_best_candidate(candidates) assert str(actual.version) == '1.0' assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' expected_message = ( 'The candidate selected for download or install is a yanked ' "version: 'mypackage' candidate " '(version 1.0 at https://example.com/pkg-1.0.tar.gz)\n' 'Reason for being yanked: ') + expected_reason assert record.message == expected_message
def test_sort_key__is_yanked(self, yanked_reason, expected): """ Test the effect of is_yanked on _sort_key()'s return value. """ candidate = make_mock_candidate('1.0', yanked_reason=yanked_reason) evaluator = CandidateEvaluator.create('my-project') sort_value = evaluator._sort_key(candidate) # Yanked / non-yanked is reflected in the second element of the tuple. actual = sort_value[1] assert actual == expected
def test_sort_key__is_yanked(self, yanked_reason: Optional[str], expected: int) -> None: """ Test the effect of is_yanked on _sort_key()'s return value. """ candidate = make_mock_candidate("1.0", yanked_reason=yanked_reason) evaluator = CandidateEvaluator.create("my-project") sort_value = evaluator._sort_key(candidate) # Yanked / non-yanked is reflected in the second element of the tuple. actual = sort_value[1] assert actual == expected
def test_sort_key__hash(self, hex_digest: Optional[str], expected: int) -> None: """ Test the effect of the link's hash on _sort_key()'s return value. """ candidate = make_mock_candidate("1.0", hex_digest=hex_digest) hashes_data = { "sha256": [64 * "a"], } hashes = Hashes(hashes_data) evaluator = CandidateEvaluator.create("my-project", hashes=hashes) sort_value = evaluator._sort_key(candidate) # The hash is reflected in the first element of the tuple. actual = sort_value[0] assert actual == expected
def test_sort_key__hash(self, hex_digest, expected): """ Test the effect of the link's hash on _sort_key()'s return value. """ candidate = make_mock_candidate('1.0', hex_digest=hex_digest) hashes_data = { 'sha256': [64 * 'a'], } hashes = Hashes(hashes_data) evaluator = CandidateEvaluator.create('my-project', hashes=hashes) sort_value = evaluator._sort_key(candidate) # The hash is reflected in the first element of the tuple. actual = sort_value[0] assert actual == expected
def test_get_applicable_candidates(self) -> None: specifier = SpecifierSet("<= 1.11") versions = ["1.10", "1.11", "1.12"] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( "my-project", specifier=specifier, ) actual = evaluator.get_applicable_candidates(candidates) expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ "1.10", "1.11", ] assert actual == expected_applicable
def test_create(self, allow_all_prereleases, prefer_binary): target_python = TargetPython() target_python._valid_tags = [('py36', 'none', 'any')] specifier = SpecifierSet() evaluator = CandidateEvaluator.create( project_name='my-project', target_python=target_python, allow_all_prereleases=allow_all_prereleases, prefer_binary=prefer_binary, specifier=specifier, ) assert evaluator._allow_all_prereleases == allow_all_prereleases assert evaluator._prefer_binary == prefer_binary assert evaluator._specifier is specifier assert evaluator._supported_tags == [('py36', 'none', 'any')]
def test_create(self, allow_all_prereleases: bool, prefer_binary: bool) -> None: target_python = TargetPython() target_python._valid_tags = [Tag("py36", "none", "any")] specifier = SpecifierSet() evaluator = CandidateEvaluator.create( project_name="my-project", target_python=target_python, allow_all_prereleases=allow_all_prereleases, prefer_binary=prefer_binary, specifier=specifier, ) assert evaluator._allow_all_prereleases == allow_all_prereleases assert evaluator._prefer_binary == prefer_binary assert evaluator._specifier is specifier assert evaluator._supported_tags == [Tag("py36", "none", "any")]
def test_get_applicable_candidates(self): specifier = SpecifierSet('<= 1.11') versions = ['1.10', '1.11', '1.12'] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, ) actual = evaluator.get_applicable_candidates(candidates) expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ '1.10', '1.11', ] assert actual == expected_applicable
def test_compute_best_candidate__none_best(self) -> None: """ Test returning a None best candidate. """ specifier = SpecifierSet("<= 1.10") versions = ["1.11", "1.12"] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( "my-project", specifier=specifier, ) result = evaluator.compute_best_candidate(candidates) assert result._candidates == candidates assert result._applicable_candidates == [] assert result.best_candidate is None
def test_compute_best_candidate__none_best(self): """ Test returning a None best candidate. """ specifier = SpecifierSet('<= 1.10') versions = ['1.11', '1.12'] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, ) result = evaluator.compute_best_candidate(candidates) assert result._candidates == candidates assert result._applicable_candidates == [] assert result.best_candidate is None
def test_link_sorting(self): """ Test link sorting """ links = [ InstallationCandidate("simple", "2.0", Link('simple-2.0.tar.gz')), InstallationCandidate( "simple", "1.0", Link('simple-1.0-pyT-none-TEST.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-TEST-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-none-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0.tar.gz'), ), ] valid_tags = [ Tag('pyT', 'none', 'TEST'), Tag('pyT', 'TEST', 'any'), Tag('pyT', 'none', 'any'), ] specifier = SpecifierSet() evaluator = CandidateEvaluator( 'my-project', supported_tags=valid_tags, specifier=specifier, ) sort_key = evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results, results assert links == results2, results2
def test_link_sorting(self) -> None: """ Test link sorting """ links = [ InstallationCandidate("simple", "2.0", Link("simple-2.0.tar.gz")), InstallationCandidate( "simple", "1.0", Link("simple-1.0-pyT-none-TEST.whl"), ), InstallationCandidate( "simple", "1.0", Link("simple-1.0-pyT-TEST-any.whl"), ), InstallationCandidate( "simple", "1.0", Link("simple-1.0-pyT-none-any.whl"), ), InstallationCandidate( "simple", "1.0", Link("simple-1.0.tar.gz"), ), ] valid_tags = [ Tag("pyT", "none", "TEST"), Tag("pyT", "TEST", "any"), Tag("pyT", "none", "any"), ] specifier = SpecifierSet() evaluator = CandidateEvaluator( "my-project", supported_tags=valid_tags, specifier=specifier, ) sort_key = evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results, results assert links == results2, results2
def test_compute_best_candidate(self) -> None: specifier = SpecifierSet("<= 1.11") versions = ["1.10", "1.11", "1.12"] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( "my-project", specifier=specifier, ) result = evaluator.compute_best_candidate(candidates) assert result._candidates == candidates expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ "1.10", "1.11", ] assert result._applicable_candidates == expected_applicable assert result.best_candidate is expected_applicable[1]
def test_compute_best_candidate(self): specifier = SpecifierSet('<= 1.11') versions = ['1.10', '1.11', '1.12'] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, ) result = evaluator.compute_best_candidate(candidates) assert result._candidates == candidates expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ '1.10', '1.11', ] assert result._applicable_candidates == expected_applicable assert result.best_candidate is expected_applicable[1]
def test_build_tag_is_less_important_than_other_tags(self) -> None: links = [ InstallationCandidate( "simple", "1.0", Link("simple-1.0-1-py3-abi3-linux_x86_64.whl"), ), InstallationCandidate( "simple", "1.0", Link("simple-1.0-2-py3-abi3-linux_i386.whl"), ), InstallationCandidate( "simple", "1.0", Link("simple-1.0-2-py3-any-none.whl"), ), InstallationCandidate( "simple", "1.0", Link("simple-1.0.tar.gz"), ), ] valid_tags = [ Tag("py3", "abi3", "linux_x86_64"), Tag("py3", "abi3", "linux_i386"), Tag("py3", "any", "none"), ] evaluator = CandidateEvaluator( "my-project", supported_tags=valid_tags, specifier=SpecifierSet(), ) sort_key = evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results, results assert links == results2, results2
def test_build_tag_is_less_important_than_other_tags(self): links = [ InstallationCandidate( "simple", "1.0", Link('simple-1.0-1-py3-abi3-linux_x86_64.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-2-py3-abi3-linux_i386.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-2-py3-any-none.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0.tar.gz'), ), ] valid_tags = [ Tag('py3', 'abi3', 'linux_x86_64'), Tag('py3', 'abi3', 'linux_i386'), Tag('py3', 'any', 'none'), ] evaluator = CandidateEvaluator( 'my-project', supported_tags=valid_tags, specifier=SpecifierSet(), ) sort_key = evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results, results assert links == results2, results2
def test_sort_best_candidate__best_yanked_but_not_all( self, caplog, ): """ Test the best candidates being yanked, but not all. """ caplog.set_level(logging.INFO) candidates = [ make_mock_candidate('4.0', yanked_reason='bad metadata #4'), # Put the best candidate in the middle, to test sorting. make_mock_candidate('2.0'), make_mock_candidate('3.0', yanked_reason='bad metadata #3'), make_mock_candidate('1.0'), ] expected_best = candidates[1] evaluator = CandidateEvaluator.create('my-project') actual = evaluator.sort_best_candidate(candidates) assert actual is expected_best assert str(actual.version) == '2.0' # Check the log messages. assert len(caplog.records) == 0
def test_sort_best_candidate__best_yanked_but_not_all( self, caplog: pytest.LogCaptureFixture, ) -> None: """ Test the best candidates being yanked, but not all. """ caplog.set_level(logging.INFO) candidates = [ make_mock_candidate("4.0", yanked_reason="bad metadata #4"), # Put the best candidate in the middle, to test sorting. make_mock_candidate("2.0"), make_mock_candidate("3.0", yanked_reason="bad metadata #3"), make_mock_candidate("1.0"), ] expected_best = candidates[1] evaluator = CandidateEvaluator.create("my-project") actual = evaluator.sort_best_candidate(candidates) assert actual is expected_best assert str(actual.version) == "2.0" # Check the log messages. assert len(caplog.records) == 0