def test_get_best_candidate__all_yanked(self, caplog): """ Test all candidates yanked. """ candidates = [ self.make_mock_candidate('1.0', yanked_reason='bad metadata #1'), # Put the best candidate in the middle, to test sorting. self.make_mock_candidate('3.0', yanked_reason='bad metadata #3'), self.make_mock_candidate('2.0', yanked_reason='bad metadata #2'), ] expected_best = candidates[1] evaluator = CandidateEvaluator() actual = evaluator.get_best_candidate(candidates) assert actual is expected_best assert str(actual.version) == '3.0' # Check the log messages. assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' assert record.message == ( 'The candidate selected for download or install is a yanked ' "version: 'mypackage' candidate " '(version 3.0 at https://example.com/pkg-3.0.tar.gz)\n' 'Reason for being yanked: bad metadata #3' )
def test_get_best_candidate__yanked_reason( self, caplog, yanked_reason, expected_reason, ): """ Test the log message with various reason strings. """ candidates = [ self.make_mock_candidate('1.0', yanked_reason=yanked_reason), ] evaluator = CandidateEvaluator(allow_yanked=True) actual = evaluator.get_best_candidate(candidates) assert str(actual.version) == '1.0' assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' expected_message = ( 'The candidate selected for download or install is a yanked ' "version: 'mypackage' candidate " '(version 1.0 at https://example.com/pkg-1.0.tar.gz)\n' 'Reason for being yanked: ') + expected_reason assert record.message == expected_message
def test_get_best_candidate__no_candidates(self): """ Test passing an empty list. """ evaluator = CandidateEvaluator() actual = evaluator.get_best_candidate([]) assert actual is None
class TestCandidateEvaluator(object): # patch this for travis which has distribute in its base env for now @patch( 'pip._internal.wheel.pkg_resources.get_distribution', lambda x: Distribution(project_name='setuptools', version='0.9') ) def setup(self): self.version = '1.0' self.search_name = 'pytest' self.canonical_name = 'pytest' valid_tags = pip._internal.pep425tags.get_supported() self.evaluator = CandidateEvaluator(valid_tags=valid_tags) @pytest.mark.parametrize( 'url', [ 'http:/yo/pytest-1.0.tar.gz', 'http:/yo/pytest-1.0-py2.py3-none-any.whl', ], ) def test_evaluate_link__match(self, url): """Test that 'pytest' archives match for 'pytest'""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) result = self.evaluator.get_install_candidate(link, search) expected = InstallationCandidate(self.search_name, self.version, link) assert result == expected, result @pytest.mark.parametrize( 'url', [ # TODO: Uncomment this test case when #1217 is fixed. # 'http:/yo/pytest-xdist-1.0.tar.gz', 'http:/yo/pytest2-1.0.tar.gz', 'http:/yo/pytest_xdist-1.0-py2.py3-none-any.whl', ], ) def test_evaluate_link__substring_fails(self, url): """Test that 'pytest<something> archives won't match for 'pytest'.""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) result = self.evaluator.get_install_candidate(link, search) assert result is None, result
class TestCandidateEvaluator(object): # patch this for travis which has distribute in its base env for now @patch( 'pip._internal.wheel.pkg_resources.get_distribution', lambda x: Distribution(project_name='setuptools', version='0.9') ) def setup(self): self.version = '1.0' self.search_name = 'pytest' self.canonical_name = 'pytest' valid_tags = pip._internal.pep425tags.get_supported() self.evaluator = CandidateEvaluator(valid_tags=valid_tags) @pytest.mark.parametrize( 'url', [ 'http:/yo/pytest-1.0.tar.gz', 'http:/yo/pytest-1.0-py2.py3-none-any.whl', ], ) def test_evaluate_link__match(self, url): """Test that 'pytest' archives match for 'pytest'""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) result = self.evaluator.evaluate_link(link, search) expected = InstallationCandidate(self.search_name, self.version, link) assert result == expected, result @pytest.mark.parametrize( 'url', [ # TODO: Uncomment this test case when #1217 is fixed. # 'http:/yo/pytest-xdist-1.0.tar.gz', 'http:/yo/pytest2-1.0.tar.gz', 'http:/yo/pytest_xdist-1.0-py2.py3-none-any.whl', ], ) def test_evaluate_link__substring_fails(self, url): """Test that 'pytest<something> archives won't match for 'pytest'.""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) result = self.evaluator.evaluate_link(link, search) assert result is None, result
def test_sort_key__is_yanked(self, yanked_reason, expected): """ Test the effect of is_yanked on _sort_key()'s return value. """ url = 'https://example.com/mypackage.tar.gz' link = Link(url, yanked_reason=yanked_reason) candidate = InstallationCandidate('mypackage', '1.0', link) evaluator = CandidateEvaluator() sort_value = evaluator._sort_key(candidate) # Yanked / non-yanked is reflected in the first element of the tuple. actual = sort_value[0] assert actual == expected
def test_make_link_evaluator( self, allow_yanked, ignore_requires_python, only_binary, expected_formats, ): # Create a test TargetPython that we can check for. target_python = TargetPython(py_version_info=(3, 7)) format_control = FormatControl(set(), only_binary) finder = PackageFinder( candidate_evaluator=CandidateEvaluator(), search_scope=SearchScope([], []), session=PipSession(), target_python=target_python, allow_yanked=allow_yanked, format_control=format_control, ignore_requires_python=ignore_requires_python, ) # Pass a project_name that will be different from canonical_name. link_evaluator = finder.make_link_evaluator('Twine') assert link_evaluator.project_name == 'Twine' assert link_evaluator._canonical_name == 'twine' assert link_evaluator._allow_yanked == allow_yanked assert link_evaluator._ignore_requires_python == ignore_requires_python assert link_evaluator._formats == expected_formats # Test the _target_python attribute. actual_target_python = link_evaluator._target_python # The target_python attribute should be set as is. assert actual_target_python is target_python # For good measure, check that the attributes weren't reset. assert actual_target_python._given_py_version_info == (3, 7) assert actual_target_python.py_version_info == (3, 7, 0)
def test_create__target_python_none(self): """ Test passing target_python=None. """ evaluator = CandidateEvaluator.create('my-project') expected_tags = get_supported() assert evaluator._supported_tags == expected_tags
def test_create__specifier_none(self): """ Test passing specifier=None. """ evaluator = CandidateEvaluator.create('my-project') expected_specifier = SpecifierSet() assert evaluator._specifier == expected_specifier
def test_get_applicable_candidates__hashes( self, specifier, expected_versions, ): """ Test a non-None hashes value. """ candidates = [ make_mock_candidate('1.0'), make_mock_candidate('1.1', hex_digest=(64 * 'a')), make_mock_candidate('1.2', hex_digest=(64 * 'b')), ] hashes_data = { 'sha256': [64 * 'b'], } hashes = Hashes(hashes_data) evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, hashes=hashes, ) actual = evaluator.get_applicable_candidates(candidates) actual_versions = [str(c.version) for c in actual] assert actual_versions == expected_versions
def test_sort_best_candidate__no_candidates(self): """ Test passing an empty list. """ evaluator = CandidateEvaluator.create('my-project') actual = evaluator.sort_best_candidate([]) assert actual is None
def test_init__py_version_default(self): """ Test the _py_version attribute's default value. """ evaluator = CandidateEvaluator([]) # Get the index of the second dot. index = sys.version.find('.', 2) assert evaluator._py_version == sys.version[:index]
class TestCandidateEvaluator(object): # patch this for travis which has distribute in its base env for now @patch( 'pip._internal.wheel.pkg_resources.get_distribution', lambda x: Distribution(project_name='setuptools', version='0.9') ) def setup(self): self.search_name = 'pytest' self.canonical_name = 'pytest' valid_tags = pip._internal.pep425tags.get_supported() self.evaluator = CandidateEvaluator(valid_tags=valid_tags) @pytest.mark.parametrize('url, expected_version', [ ('http:/yo/pytest-1.0.tar.gz', '1.0'), ('http:/yo/pytest-1.0-py2.py3-none-any.whl', '1.0'), ]) def test_evaluate_link__match(self, url, expected_version): """Test that 'pytest' archives match for 'pytest'""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) actual = self.evaluator.evaluate_link(link, search) assert actual == (True, expected_version) @pytest.mark.parametrize('url, expected_msg', [ # TODO: Uncomment this test case when #1217 is fixed. # 'http:/yo/pytest-xdist-1.0.tar.gz', ('http:/yo/pytest2-1.0.tar.gz', 'Missing project version for pytest'), ('http:/yo/pytest_xdist-1.0-py2.py3-none-any.whl', 'wrong project name (not pytest)'), ]) def test_evaluate_link__substring_fails(self, url, expected_msg): """Test that 'pytest<something> archives won't match for 'pytest'.""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) actual = self.evaluator.evaluate_link(link, search) assert actual == (False, expected_msg)
def test_init__target_python(self): """ Test the target_python argument. """ target_python = TargetPython(py_version_info=(3, 7, 3)) evaluator = CandidateEvaluator(target_python=target_python) # The target_python attribute should be set as is. assert evaluator._target_python is target_python
def test_evaluate_link__allow_yanked( self, yanked_reason, allow_yanked, expected, ): evaluator = CandidateEvaluator(allow_yanked=allow_yanked) link = Link( 'https://example.com/#egg=twine-1.12', yanked_reason=yanked_reason, ) search = Search( supplied='twine', canonical='twine', formats=['source'], ) actual = evaluator.evaluate_link(link, search=search) assert actual == expected
def test_init__py_version_info(self, py_version_info, expected_py_version): """ Test the py_version_info argument. """ evaluator = CandidateEvaluator([], py_version_info=py_version_info) # The _py_version_info attribute should be set as is. assert evaluator._py_version_info == py_version_info assert evaluator._py_version == expected_py_version
def test_init__target_python_none(self): """ Test passing None for the target_python argument. """ evaluator = CandidateEvaluator(target_python=None) # Spot-check the default TargetPython object. actual_target_python = evaluator._target_python assert actual_target_python._given_py_version_info is None assert actual_target_python.py_version_info == CURRENT_PY_VERSION_INFO
def test_make_found_candidates(self): specifier = SpecifierSet('<= 1.11') versions = ['1.10', '1.11', '1.12'] candidates = [ self.make_mock_candidate(version) for version in versions ] evaluator = CandidateEvaluator() found_candidates = evaluator.make_found_candidates( candidates, specifier=specifier, ) assert found_candidates._candidates == candidates assert found_candidates._evaluator is evaluator expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ '1.10', '1.11', ] assert found_candidates._applicable_candidates == expected_applicable
def test_sort_key__is_yanked(self, yanked_reason, expected): """ Test the effect of is_yanked on _sort_key()'s return value. """ candidate = make_mock_candidate('1.0', yanked_reason=yanked_reason) evaluator = CandidateEvaluator.create('my-project') sort_value = evaluator._sort_key(candidate) # Yanked / non-yanked is reflected in the second element of the tuple. actual = sort_value[1] assert actual == expected
def test_get_best_candidate__best_yanked_but_not_all(self, caplog): """ Test the best candidates being yanked, but not all. """ candidates = [ self.make_mock_candidate('4.0', yanked_reason='bad metadata #4'), # Put the best candidate in the middle, to test sorting. self.make_mock_candidate('2.0'), self.make_mock_candidate('3.0', yanked_reason='bad metadata #3'), self.make_mock_candidate('1.0'), ] expected_best = candidates[1] evaluator = CandidateEvaluator() actual = evaluator.get_best_candidate(candidates) assert actual is expected_best assert str(actual.version) == '2.0' # Check the log messages. assert len(caplog.records) == 0
def test_evaluate_link__incompatible_wheel(self): """ Test an incompatible wheel. """ target_python = TargetPython(py_version_info=(3, 6, 4)) # Set the valid tags to an empty list to make sure nothing matches. target_python._valid_tags = [] evaluator = CandidateEvaluator(target_python=target_python) link = Link('https://example.com/sample-1.0-py2.py3-none-any.whl') search = Search( supplied='sample', canonical='sample', formats=['binary'], ) actual = evaluator.evaluate_link(link, search=search) expected = ( False, "none of the wheel's tags match: py2-none-any, py3-none-any") assert actual == expected
def test_create(self, allow_all_prereleases, prefer_binary): target_python = TargetPython() target_python._valid_tags = [('py36', 'none', 'any')] evaluator = CandidateEvaluator.create( target_python=target_python, allow_all_prereleases=allow_all_prereleases, prefer_binary=prefer_binary, ) assert evaluator._allow_all_prereleases == allow_all_prereleases assert evaluator._prefer_binary == prefer_binary assert evaluator._supported_tags == [('py36', 'none', 'any')]
def test_init__py_version_info_none(self): """ Test passing None for the py_version_info argument. """ evaluator = CandidateEvaluator([], py_version_info=None) # Get the index of the second dot. index = sys.version.find('.', 2) current_major_minor = sys.version[:index] # e.g. "3.6" assert evaluator._py_version_info == CURRENT_PY_VERSION_INFO assert evaluator._py_version == current_major_minor
def test_evaluate_link( self, py_version_info, ignore_requires_python, expected, ): link = Link( 'https://example.com/#egg=twine-1.12', requires_python='== 3.6.5', ) search = Search( supplied='twine', canonical='twine', formats=['source'], ) evaluator = CandidateEvaluator( [], py_version_info=py_version_info, ignore_requires_python=ignore_requires_python, ) actual = evaluator.evaluate_link(link, search=search) assert actual == expected
def test_sort_key__hash(self, hex_digest, expected): """ Test the effect of the link's hash on _sort_key()'s return value. """ candidate = make_mock_candidate('1.0', hex_digest=hex_digest) hashes_data = { 'sha256': [64 * 'a'], } hashes = Hashes(hashes_data) evaluator = CandidateEvaluator.create('my-project', hashes=hashes) sort_value = evaluator._sort_key(candidate) # The hash is reflected in the first element of the tuple. actual = sort_value[0] assert actual == expected
def test_get_applicable_candidates(self): specifier = SpecifierSet('<= 1.11') versions = ['1.10', '1.11', '1.12'] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, ) actual = evaluator.get_applicable_candidates(candidates) expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ '1.10', '1.11', ] assert actual == expected_applicable
def test_create(self, allow_all_prereleases, prefer_binary): target_python = TargetPython() target_python._valid_tags = [('py36', 'none', 'any')] specifier = SpecifierSet() evaluator = CandidateEvaluator.create( project_name='my-project', target_python=target_python, allow_all_prereleases=allow_all_prereleases, prefer_binary=prefer_binary, specifier=specifier, ) assert evaluator._allow_all_prereleases == allow_all_prereleases assert evaluator._prefer_binary == prefer_binary assert evaluator._specifier is specifier assert evaluator._supported_tags == [('py36', 'none', 'any')]
def test_compute_best_candidate__none_best(self): """ Test returning a None best candidate. """ specifier = SpecifierSet('<= 1.10') versions = ['1.11', '1.12'] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, ) result = evaluator.compute_best_candidate(candidates) assert result._candidates == candidates assert result._applicable_candidates == [] assert result.best_candidate is None
def test_compute_best_candidate(self): specifier = SpecifierSet('<= 1.11') versions = ['1.10', '1.11', '1.12'] candidates = [make_mock_candidate(version) for version in versions] evaluator = CandidateEvaluator.create( 'my-project', specifier=specifier, ) result = evaluator.compute_best_candidate(candidates) assert result._candidates == candidates expected_applicable = candidates[:2] assert [str(c.version) for c in expected_applicable] == [ '1.10', '1.11', ] assert result._applicable_candidates == expected_applicable assert result.best_candidate is expected_applicable[1]
def test_link_sorting(self): """ Test link sorting """ links = [ InstallationCandidate("simple", "2.0", Link('simple-2.0.tar.gz')), InstallationCandidate( "simple", "1.0", Link('simple-1.0-pyT-none-TEST.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-TEST-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-none-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0.tar.gz'), ), ] valid_tags = [ ('pyT', 'none', 'TEST'), ('pyT', 'TEST', 'any'), ('pyT', 'none', 'any'), ] specifier = SpecifierSet() evaluator = CandidateEvaluator( 'my-project', supported_tags=valid_tags, specifier=specifier, ) sort_key = evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results == results2, results2
def test_link_sorting(self): """ Test link sorting """ links = [ InstallationCandidate("simple", "2.0", Link('simple-2.0.tar.gz')), InstallationCandidate( "simple", "1.0", Link('simple-1.0-pyT-none-TEST.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-TEST-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-none-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0.tar.gz'), ), ] valid_tags = [ ('pyT', 'none', 'TEST'), ('pyT', 'TEST', 'any'), ('pyT', 'none', 'any'), ] target_python = TargetPython() target_python._valid_tags = valid_tags evaluator = CandidateEvaluator( allow_yanked=True, target_python=target_python, ) sort_key = evaluator._sort_key results = sorted(links, key=sort_key, reverse=True) results2 = sorted(reversed(links), key=sort_key, reverse=True) assert links == results == results2, results2
def setup(self): self.version = '1.0' self.search_name = 'pytest' self.canonical_name = 'pytest' valid_tags = pip._internal.pep425tags.get_supported() self.evaluator = CandidateEvaluator(valid_tags=valid_tags)