def testSimpleRequirements(self): assert ( list(parse_requirements('Twis-Ted>=1.2-1')) == [Requirement('Twis-Ted>=1.2-1')] ) assert ( list(parse_requirements('Twisted >=1.2, \\ # more\n<2.0')) == [Requirement('Twisted>=1.2,<2.0')] ) assert ( Requirement.parse("FooBar==1.99a3") == Requirement("FooBar==1.99a3") ) with pytest.raises(ValueError): Requirement.parse(">=2.3") with pytest.raises(ValueError): Requirement.parse("x\\") with pytest.raises(ValueError): Requirement.parse("x==2 q") with pytest.raises(ValueError): Requirement.parse("X==1\nY==2") with pytest.raises(ValueError): Requirement.parse("#")
def _parse(text): """ Generate parsed requirements from `text`, which should contain newline separated requirement specs. - Additionally looks for "#import name" comments after requirement lines (the actual root module name of the required package to use for runtime dependency checks) and stores them as .impname attrs on the Requirement instances. - Supports #py.. tags at the beginning of lines, specifying a python version the requirement applies to. """ for line in text.split('\n'): line = line.strip() if not line: continue match = re.match(r'^#py([0-9]+) (.*)$', line) if match: #==> only required in given python version pyver, line = match.groups() #TODO: # if len(pyver) > 2: if not ('%s%s' % sys.version_info[:2]).startswith(pyver): continue try: req, impname = line.split('#import') except ValueError: req = next(parse_requirements(line), None) impname = req and req.unsafe_name else: req = next(parse_requirements(req), None) if not req: # maybe a comment line continue req.impname = impname.strip() yield req
def main(requirements_filename: str) -> int: with open(requirements_filename, 'r') as requirements_file: requirement_strings = [_process_line(line) for line in requirements_file] requirements = list(pkg_resources.parse_requirements(requirement_strings)) logging.info('Loaded %d requirements from %s.', len(requirements), requirements_filename) duplicates = _duplicates([r.key for r in requirements]) if duplicates: logging.critical('Duplicate requirements: %s', duplicates) return 1 frozen_lines = [l.decode() for l in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()] frozen_requirements = list(pkg_resources.parse_requirements(frozen_lines)) logging.info('Loaded %d requirements from `pip freeze`', len(frozen_requirements)) required_but_not_present = set(requirements).difference(set(frozen_requirements)) present_but_not_required = [r for r in set(frozen_requirements).difference(set(requirements)) if r.key not in _IGNORED_WHITELIST] rc = 0 if required_but_not_present: logging.error('Required but not currently present: %s', required_but_not_present) logging.error('Please investigate why a required module was not installed.') rc += 2 if present_but_not_required: logging.error('Present but not required: %s', present_but_not_required) logging.error('Add these to requirements_frozen.txt or ignored whitelist.') rc += 4 return rc
def test_extras(zfg, zfg_path, in_repo, in_site_packages): for extra in zetup.__extras__: assert zetup.__extras__[extra] == zfg.EXTRAS[extra] if in_repo: assert list(parse_requirements(str(zetup.__extras__[extra]))) == list( parse_requirements((zfg_path / ("requirements.%s.txt" % extra)).text()) )
def test_lt_lte(self): # < x <= y never conflicts requirements = list(parse_requirements(['a < 0.1', 'a <= 0.1'])) expected_specs = {'a': set([('<', '0.1'), ('<=', '0.1')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs) requirements = list(parse_requirements(['a < 0.1', 'a <= 0.2'])) expected_specs = {'a': set([('<', '0.1'), ('<=', '0.2')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs)
def test_gte_gte(self): # >= x >= y never conflicts requirements = list(parse_requirements(['a >= 0.1', 'a >= 0.1'])) expected_specs = {'a': set([('>=', '0.1')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs) requirements = list(parse_requirements(['a >= 0.1', 'a >= 0.2'])) expected_specs = {'a': set([('>=', '0.1'), ('>=', '0.2')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs)
def test_eqs_lte(self): # == x conflicts with <= y if x > y requirements = list(parse_requirements(['a == 0.2', 'a <= 0.1'])) expected_conflicts = {'a': [('==', '0.2'), ('<=', '0.1')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts) requirements = list(parse_requirements(['a == 0.1', 'a <= 0.1'])) # no conflict expected_specs = {'a': set([('==', '0.1'), ('<=', '0.1')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs)
def test_eq_gte(self): # == x conflicts with >= y if x < y requirements = list(parse_requirements(['a == 0.1', 'a >= 0.2'])) expected_conflicts = {'a': [('==', '0.1'), ('>=', '0.2')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts) requirements = list(parse_requirements(['a == 0.1', 'a >= 0.1'])) expected_specs = {'a': set([('==', '0.1'), ('>=', '0.1')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs)
def test_ne(self): # != can only conflict with == (which is tested above) for s in ['<', '>', '<=', '>=']: requirements = list(parse_requirements(['a != 0.1', 'a %s 0.1' % s])) expected_specs = {'a': set([('!=', '0.1'), ('%s' % s, '0.1')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs) requirements = list(parse_requirements(['a != 0.1', 'a %s 0.2' % s])) expected_specs = {'a': set([('!=', '0.1'), ('%s' % s, '0.2')])} self.run_no_conflict_test(requirements, expected_specs) self.run_no_conflict_test(reverse(requirements), expected_specs)
def parse(cls, s, lineno): # setuptools requires a space before the comment. If this isn't the case, add it. if "\t#" in s: parsed, = parse_requirements(s.replace("\t#", "\t #")) else: parsed, = parse_requirements(s) return cls( name=parsed.project_name, specs=parsed.specs, line=s, lineno=lineno, hashCmp=parsed.hashCmp, extras=parsed.extras )
def test_environment_marker_evaluation_called(self): """ If one package foo requires bar without any extras, markers should pass for bar without extras. """ parent_req, = parse_requirements("foo") req, = parse_requirements("bar;python_version>='2'") req_extras = pkg_resources._ReqExtras({req: parent_req.extras}) assert req_extras.markers_pass(req) parent_req, = parse_requirements("foo[]") req, = parse_requirements("bar;python_version>='2'") req_extras = pkg_resources._ReqExtras({req: parent_req.extras}) assert req_extras.markers_pass(req)
def test_lt_gte(self): # < x conflicts with >= y if x <= y requirements = list(parse_requirements(['a < 0.1', 'a >= 0.1'])) expected_conflicts = {'a': [('<', '0.1'), ('>=', '0.1')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts) requirements = list(parse_requirements(['a < 0.1', 'a >= 0.2'])) expected_conflicts = {'a': [('<', '0.1'), ('>=', '0.2')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts)
def test_gt_lte(self): # > x conflicts with <= y if x >= y requirements = list(parse_requirements(['a > 0.1', 'a <= 0.1'])) expected_conflicts = {'a': [('>', '0.1'), ('<=', '0.1')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts) requirements = list(parse_requirements(['a > 0.2', 'a <= 0.1'])) expected_conflicts = {'a': [('>', '0.2'), ('<=', '0.1')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts)
def test_eqs_gt(self): # == x conflicts with > y if x <= y requirements = list(parse_requirements(['a == 0.1', 'a > 0.1'])) expected_conflicts = {'a': [('==', '0.1'), ('>', '0.1')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts) requirements = list(parse_requirements(['a == 0.1', 'a > 0.2'])) expected_conflicts = {'a': [('==', '0.1'), ('>', '0.2')]} self.run_conflict_test(requirements, expected_conflicts) self.run_conflict_test_reversed(requirements, expected_conflicts)
def test_marker_evaluation_with_extras_normlized(self): """Extras are also evaluated as markers at resolution time.""" ad = pkg_resources.Environment([]) ws = WorkingSet([]) Foo = Distribution.from_filename( "/foo_dir/Foo-1.2.dist-info", metadata=Metadata(("METADATA", "Provides-Extra: baz-lightyear\n" "Requires-Dist: quux; extra=='baz-lightyear'")) ) ad.add(Foo) assert list(ws.resolve(parse_requirements("Foo"), ad)) == [Foo] quux = Distribution.from_filename("/foo_dir/quux-1.0.dist-info") ad.add(quux) res = list(ws.resolve(parse_requirements("Foo[baz-lightyear]"), ad)) assert res == [Foo, quux]
def _move_install_requirements_markers(self): """ Move requirements in `install_requires` that are using environment markers `extras_require`. """ # divide the install_requires into two sets, simple ones still # handled by install_requires and more complex ones handled # by extras_require. def is_simple_req(req): return not req.marker spec_inst_reqs = getattr(self, 'install_requires', None) or () inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs)) simple_reqs = filter(is_simple_req, inst_reqs) complex_reqs = filterfalse(is_simple_req, inst_reqs) self.install_requires = list(map(str, simple_reqs)) for r in complex_reqs: self._tmp_extras_require[':' + str(r.marker)].append(r) self.extras_require = dict( (k, [str(r) for r in map(self._clean_req, v)]) for k, v in self._tmp_extras_require.items() )
def test_resolve_conflicts_with_prior(self): """ A ContextualVersionConflict should be raised when a requirement conflicts with a prior requirement for a different package. """ # Create installation where Foo depends on Baz 1.0 and Bar depends on # Baz 2.0. ws = WorkingSet([]) md = Metadata(('depends.txt', "Baz==1.0")) Foo = Distribution.from_filename("/foo_dir/Foo-1.0.egg", metadata=md) ws.add(Foo) md = Metadata(('depends.txt', "Baz==2.0")) Bar = Distribution.from_filename("/foo_dir/Bar-1.0.egg", metadata=md) ws.add(Bar) Baz = Distribution.from_filename("/foo_dir/Baz-1.0.egg") ws.add(Baz) Baz = Distribution.from_filename("/foo_dir/Baz-2.0.egg") ws.add(Baz) with pytest.raises(VersionConflict) as vc: ws.resolve(parse_requirements("Foo\nBar\n")) msg = "Baz 1.0 is installed but Baz==2.0 is required by " msg += repr(set(['Bar'])) assert vc.value.report() == msg
def fetch_build_eggs(self, requires): """Resolve pre-setup requirements""" resolved_dists = pkg_resources.working_set.resolve( pkg_resources.parse_requirements(requires), installer=self.fetch_build_egg, replace_conflicting=True ) for dist in resolved_dists: pkg_resources.working_set.add(dist, replace=True)
def parametrize_test_working_set_resolve(*test_list): idlist = [] argvalues = [] for test in test_list: ( name, installed_dists, installable_dists, requirements, expected1, expected2 ) = [ strip_comments(s.lstrip()) for s in textwrap.dedent(test).lstrip().split('\n\n', 5) ] installed_dists = list(parse_distributions(installed_dists)) installable_dists = list(parse_distributions(installable_dists)) requirements = list(pkg_resources.parse_requirements(requirements)) for id_, replace_conflicting, expected in ( (name, False, expected1), (name + '_replace_conflicting', True, expected2), ): idlist.append(id_) expected = strip_comments(expected.strip()) if re.match('\w+$', expected): expected = getattr(pkg_resources, expected) assert issubclass(expected, Exception) else: expected = list(parse_distributions(expected)) argvalues.append(pytest.param(installed_dists, installable_dists, requirements, replace_conflicting, expected)) return pytest.mark.parametrize('installed_dists,installable_dists,' 'requirements,replace_conflicting,' 'resolved_dists_or_exception', argvalues, ids=idlist)
def fetch_build_eggs(self, requires): """Resolve pre-setup requirements""" from pkg_resources import working_set, parse_requirements for dist in working_set.resolve( parse_requirements(requires), installer=self.fetch_build_egg ): working_set.add(dist)
def test_environment_marker_evaluation_positive(self): ad = pkg_resources.Environment([]) ws = WorkingSet([]) Foo = Distribution.from_filename("/foo_dir/Foo-1.2.dist-info") ad.add(Foo) res = ws.resolve(parse_requirements("Foo;python_version>='2'"), ad) assert list(res) == [Foo]
def _check(self,requirement,expected): result = _constrain( self.i, tuple(parse_requirements(requirement))[0] ) self.failUnless(isinstance(result,Requirement)) compare(expected,str(result))
def get_deb_depends_from_setuptools_requires(requirements): depends = [] # This will be the return value from this function. parsed_reqs=[] for extra,reqs in pkg_resources.split_sections(requirements): if extra: continue parsed_reqs.extend(pkg_resources.parse_requirements(reqs)) if not parsed_reqs: return depends if not os.path.exists('/usr/bin/apt-file'): raise ValueError('apt-file not in /usr/bin. Please install ' 'with: sudo apt-get install apt-file') # Ask apt-file for any packages which have a .egg-info file by # these names. # Note that apt-file appears to think that some packages # e.g. setuptools itself have "foo.egg-info/BLAH" files but not a # "foo.egg-info" directory. egginfore=("(/(%s)(?:-[^/]+)?(?:-py[0-9]\.[0-9.]+)?\.egg-info)" % '|'.join(req.project_name for req in parsed_reqs)) args = ["apt-file", "search", "--ignore-case", "--regexp", egginfore] try: cmd = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) except Exception, le: log.error('ERROR running: %s', ' '.join(args)) raise RuntimeError('exception %s from subprocess %s' % (le,args))
def test_marker_evaluation_with_extras_loop(self): ad = pkg_resources.Environment([]) ws = WorkingSet([]) # Metadata needs to be native strings due to cStringIO behaviour in # 2.6, so use str(). a = Distribution.from_filename( "/foo_dir/a-0.2.dist-info", metadata=Metadata(("METADATA", str("Requires-Dist: c[a]"))) ) b = Distribution.from_filename( "/foo_dir/b-0.3.dist-info", metadata=Metadata(("METADATA", str("Requires-Dist: c[b]"))) ) c = Distribution.from_filename( "/foo_dir/c-1.0.dist-info", metadata=Metadata(("METADATA", str("Provides-Extra: a\n" "Requires-Dist: b;extra=='a'\n" "Provides-Extra: b\n" "Requires-Dist: foo;extra=='b'"))) ) foo = Distribution.from_filename("/foo_dir/foo-0.1.dist-info") for dist in (a, b, c, foo): ad.add(dist) res = list(ws.resolve(parse_requirements("a"), ad)) assert res == [a, c, b, foo]
def install_puppet_modules(): modules_installed = get_modules_installed() with open(MODULES_FILE_PATH) as modules_file: modules_requirements = modules_file.read().replace("/", "-") for module in parse_requirements(modules_requirements): current_cmd, compare, version, version_comparison = "", "", "", None if module.project_name in modules_installed: if module.specs: compare, version = module.specs[0] version_comparison = apt_pkg.version_compare(modules_installed[module.project_name], version) else: continue if version_comparison == 0 and compare is not ">": # module version installed is equal version continue else: # module version installed is smaller or bigger than version current_cmd = "upgrade" else: current_cmd = "install" if version and compare and ">" not in compare: run(current_cmd, module.project_name, version) else: if not version_comparison or version_comparison < 0: run(current_cmd, module.project_name)
def testSimpleRequirements(self): assert list(parse_requirements("Twis-Ted>=1.2-1")) == [Requirement("Twis-Ted", [(">=", "1.2-1")], ())] assert list(parse_requirements("Twisted >=1.2, \ # more\n<2.0")) == [ Requirement("Twisted", [(">=", "1.2"), ("<", "2.0")], ()) ] assert Requirement.parse("FooBar==1.99a3") == Requirement("FooBar", [("==", "1.99a3")], ()) with pytest.raises(ValueError): Requirement.parse(">=2.3") with pytest.raises(ValueError): Requirement.parse("x\\") with pytest.raises(ValueError): Requirement.parse("x==2 q") with pytest.raises(ValueError): Requirement.parse("X==1\nY==2") with pytest.raises(ValueError): Requirement.parse("#")
def uninstall_eggs(reqs): """ Remove eggs matching the given requirements. """ # XXX This doesn't do dependencies? dists = [] names = [i.project_name for i in pkg_resources.parse_requirements(reqs)] for name in names: dist = [d for d in pkg_resources.working_set if d.project_name == name] if not dist: raise DistutilsOptionError('Cannot remove package not yet ' 'installed: %s' % name) dist = dist[0] if not dist.location.endswith('.egg'): raise DistutilsOptionError('Not an egg at %s, chickening out' % dist.location) if is_local(dist.location): dists.append(dist) else: log.info("Not uninstalling egg, it's not in our virtualenv: %s" % dist.location) for dist in dists: log.info("Removing %s (%s)" % (dist, dist.location)) shutil.rmtree(dist.location) dependency.remove_from_ws(pkg_resources.working_set, dist)
def uninstall_eggs(reqs): """ Remove eggs matching the given requirements. """ # XXX This doesn't do dependencies? dists = [] for i in pkg_resources.parse_requirements(reqs): dist = next((d for d in pkg_resources.working_set if d.project_name == i.project_name), None) if not dist: raise DistutilsOptionError('Cannot remove package, not installed', i.project_name) if not dist.location.endswith('.egg'): raise DistutilsOptionError('Not an egg at %s, chickening out' % dist.location) dists.append(dist) for dist in dists: if is_local(dist.location): log.info("Removing %s (%s)" % (dist, dist.location)) # Clear references to egg - http://trac.edgewall.org/ticket/7014 uncache_zipdir(dist.location) try: os.remove(dist.location) except OSError as ex: if ex.errno == errno.EISDIR: shutil.rmtree(dist.location) else: raise else: log.info("Not uninstalling egg, it's not in our virtualenv: %s", dist.location) dependency.remove_from_ws(pkg_resources.working_set, dist)
def get_deb_depends_from_setuptools_requires(requirements, on_failure="warn"): """ Suppose you can't confidently figure out a .deb which satisfies a given requirement. If on_failure == 'warn', then log a warning. If on_failure == 'raise' then raise CantSatisfyRequirement exception. If on_failure == 'guess' then guess that python-$FOO will satisfy the dependency and that the Python version numbers will apply to the Debian packages (in addition to logging a warning message). """ assert on_failure in ("raise", "warn", "guess"), on_failure import pkg_resources depends = [] # This will be the return value from this function. parsed_reqs=[] for extra,reqs in pkg_resources.split_sections(requirements): if extra: continue parsed_reqs.extend(pkg_resources.parse_requirements(reqs)) if not parsed_reqs: return depends if not os.path.exists('/usr/bin/apt-file'): raise ValueError('apt-file not in /usr/bin. Please install ' 'with: sudo apt-get install apt-file') # Ask apt-file for any packages which have a .egg-info file by # these names. # Note that apt-file appears to think that some packages # e.g. setuptools itself have "foo.egg-info/BLAH" files but not a # "foo.egg-info" directory. egginfore=("(/(%s)(?:-[^/]+)?(?:-py[0-9]\.[0-9.]+)?\.egg-info)" % '|'.join(req.project_name.replace('-', '_') for req in parsed_reqs)) args = ["apt-file", "search", "--ignore-case", "--regexp", egginfore] if 1: # do dry run on apt-file dry_run_args = args[:] + ['--dummy','--non-interactive'] cmd = subprocess.Popen(dry_run_args,stderr=subprocess.PIPE) returncode = cmd.wait() if returncode: err_output = cmd.stderr.read() raise RuntimeError('Error running "apt-file search": ' + err_output.strip()) try: cmd = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) except Exception, le: # TODO: catch rc=1 and "E: The cache directory is empty. You need to # run 'apt-file update' first.", and tell the user to follow those # instructions. log.error('ERROR running: %s', ' '.join(args)) raise RuntimeError('exception %s from subprocess %s' % (le,args))
def parse_requirements_file(self): contents = self.fetch_requirements() if contents: contents = strip_requirements(contents) if contents: for req in parse_requirements(contents): yield req.project_name.lower(), req.specs
def test_spaces_between_multiple_versions(self): req, = parse_requirements('foo>=1.0, <3') req, = parse_requirements('foo >= 1.0, < 3')
#!/usr/bin/env python # -*- coding: utf-8 -*- import pathlib from pkg_resources import parse_requirements from setuptools import find_packages, setup # Repository root directory for use with reading files directory = pathlib.Path(__file__).parent # Load install_requires from requirements.txt. # https://stackoverflow.com/a/59971236/4651668 requirements = directory.joinpath('requirements.txt').read_text() requirements = [str(r) for r in parse_requirements(requirements)] version = '1.13.0' # Do not edit: this is maintained by bumpversion (see .bumpversion.cfg) setup( name='rotkehlchen', author='Rotki Solutions GmbH', author_email='*****@*****.**', description= 'Acccounting, asset management and tax report helper for cryptocurrencies', license='BSD-3', keywords= 'accounting tax-report portfolio asset-management cryptocurrencies', url='https://github.com/rotki/rotki', packages=find_packages('.'), package_data={ # TODO: Investigate if it's needed. rotkehlchen.spec is where files seem to be copied "rotkehlchen": ["data/*.json", "data/*.meta"],
# Note: To use the 'upload' functionality of this file, you must: # $ pipenv install twine --dev import io import os import sys from pkg_resources import parse_requirements from shutil import rmtree from setuptools import find_packages, setup, Command here = os.path.abspath(os.path.dirname(__file__)) # What packages are required for this module to be executed? with open(os.path.join(here, 'requirements.txt'), 'r') as f: REQUIRED = [ str(req) for req in parse_requirements(f) if req.name != 'python' ] print(REQUIRED) # What packages are optional? EXTRAS = {} # 'fancy feature': ['django'] extras = [ 'all', 'clustering', 'db_interaction', 'deep_learning', 'feature_encoding', 'time_series', 'webtools' ] # todo walk through folders and add the 'all' for extra in extras: rel_path = 'ml_toolkit/requirements.txt' if extra == 'all' else f'ml_toolkit/{extra}/requirements.txt' path = os.path.join(here, rel_path) with open(path, 'r') as f: EXTRAS[extra] = [ str(req) for req in parse_requirements(f)
def check_installation(): """Utility function to check package installation. """ import pkg_resources import os import sys import platform import subprocess log = ui.FancyPrinter() # Get system information log.line('System information') log.data(field='System', value=platform.system()) log.data(field='Release', value=platform.release()) log.data(field='Version', value=platform.version()) log.data(field='Processor', value=platform.processor()) log.line() # Get Python installation information log.line('Python information') log.data(field='Version', value=sys.version) log.data(field='Compiler', value=platform.python_compiler()) log.data(field='Implementation', value=platform.python_implementation()) log.line() # Get package information log.line('Package information') log.data(field='Name', value=__name__) log.data(field='Version', value=__version__) log.line() package = pkg_resources.require('dcase_util')[0] # Get core requirements core_requirements = package.requires() # Load requirements.txt requirements_filename = os.path.join(package.location, 'requirements.txt') with open(requirements_filename) as fp: requirements_file = fp.read() # Get all requirements all_requirements = [] for r in pkg_resources.parse_requirements(requirements_file): if r.marker: raise ValueError("environment markers are not supported, in '%s'" % r) all_requirements.append(r) processed = [] log.line('Core requirements') log.row('Package', 'Required', 'Installed', 'Status', widths=[25, 15, 15, 15]) log.row_sep() for requirement in core_requirements: if requirement.key not in processed: log.row( requirement.key, ''.join(requirement.specs[0]), pkg_resources.get_distribution(requirement.key).version, 'OK' if requirement.__contains__( pkg_resources.get_distribution( requirement.key)) else 'CHECK') processed.append(requirement.key) log.line() log.line('Extra requirements') log.row('Package', 'Required', 'Installed', 'Status', widths=[25, 15, 15, 15]) log.row_sep() for requirement in all_requirements: if requirement.key not in processed: log.row( requirement.key, ''.join(requirement.specs[0]), pkg_resources.get_distribution(requirement.key).version, 'OK' if requirement.__contains__( pkg_resources.get_distribution( requirement.key)) else 'CHECK') processed.append(requirement.key) log.line() # Get system level requirements log.line('System') ffmpeg_info = subprocess.check_output(['ffmpeg', '-version']).decode('utf-8') log.data(field='FFMPEG', value=ffmpeg_info)
def parse_requirements(path: str) -> 'list[str]': with pathlib.Path(path).open() as requirements: return [ str(req) for req in pkg_resources.parse_requirements(requirements) ]
def reqstr2obj(reqstr): return list(parse_requirements([reqstr]))[0]
def parse_requirements(requirements): return list(pkg_resources.parse_requirements(requirements))
def _parse_working_set(entries): for req in parse_requirements(entries): yield _Entry(req)
# See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup from setuptools.command.test import test as TestCommand import os import sys import pkg_resources __version__ = '0.17.2' PACKAGE_NAME = 'ibm_platform_services' PACKAGE_DESC = 'Python client library for IBM Cloud Platform Services' with open('requirements.txt') as f: install_requires = [ str(req) for req in pkg_resources.parse_requirements(f) ] with open('requirements-dev.txt') as f: tests_require = [str(req) for req in pkg_resources.parse_requirements(f)] if sys.argv[-1] == 'publish': # test server os.system('python setup.py register -r pypitest') os.system('python setup.py sdist upload -r pypitest') # production server os.system('python setup.py register -r pypi') os.system('python setup.py sdist upload -r pypi') sys.exit()
def check_requirements( extras, github_annotate): # pylint disable: too-many-locals-too-many-branches """Check the 'requirements/*.txt' files. Checks that the environments specified in the requirements files match all the dependencies specified in 'setup.json. The arguments allow to specify which 'extra' requirements to expect. Use 'DEFAULT' to select 'atomic_tools', 'docs', 'notebook', 'rest', and 'tests'. """ if len(extras) == 1 and extras[0] == 'DEFAULT': extras = ['atomic_tools', 'docs', 'notebook', 'rest', 'tests'] # Read the requirements from 'setup.json' setup_cfg = _load_setup_cfg() install_requires = setup_cfg['install_requires'] for extra in extras: install_requires.extend(setup_cfg['extras_require'][extra]) install_requires = set(parse_requirements(install_requires)) not_installed = defaultdict(list) for fn_req in (ROOT / 'requirements').iterdir(): match = re.match(r'.*-py-(.*)\.txt', str(fn_req)) if not match: continue env = {'python_version': match.groups()[0]} required = { r for r in install_requires if r.marker is None or r.marker.evaluate(env) } with open(fn_req) as req_file: working_set = list(_parse_working_set(req_file)) installed = { req for req in required for entry in working_set if entry.fulfills(req) } for dependency in required.difference(installed): not_installed[dependency].append(fn_req) if any(not_installed.values()): setup_json_linenos = _find_linenos_of_requirements_in_setup_json( not_installed) # Format error message to be presented to user. error_msg = [ "The requirements/ files are missing dependencies specified in the 'setup.json' file.", '' ] for dependency, fn_reqs in not_installed.items(): src = 'setup.json:' + ','.join( str(lineno + 1) for lineno in setup_json_linenos[dependency]) error_msg.append( f'{src}: No match for dependency `{dependency}` in:') for fn_req in sorted(fn_reqs): error_msg.append(f' - {fn_req.relative_to(ROOT)}') if GITHUB_ACTIONS: # Set the step ouput error message which can be used, e.g., for display as part of an issue comment. print('::set-output name=error::' + '%0A'.join(error_msg)) if GITHUB_ACTIONS and github_annotate: # Annotate the setup.json file with specific warnings. for dependency, fn_reqs in not_installed.items(): for lineno in setup_json_linenos[dependency]: print(f'::warning file=setup.json,line={lineno+1}::' f"No match for dependency '{dependency}' in: " + ','.join( str(fn_req.relative_to(ROOT)) for fn_req in fn_reqs)) raise DependencySpecificationError('\n'.join(error_msg)) click.secho( "Requirements files appear to be in sync with specifications in 'setup.json'.", fg='green')
from setuptools import setup DIR_PATH = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(DIR_PATH, 'README.md'), encoding='utf-8') as file: long_description = file.read() about = {} with open(os.path.join(DIR_PATH, 'flake8_typo', '__about__.py'), 'r', encoding='utf-8') as file: exec(file.read(), about) with open(os.path.join(DIR_PATH, 'requirements.txt'), encoding='utf-8') as file: requirements = [str(req) for req in parse_requirements(file.read())] with open(os.path.join(DIR_PATH, 'requirements-dev.txt'), encoding='utf-8') as file: requirements_dev = [str(req) for req in parse_requirements(file.read())] setup( name=about['__title__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', version=about['__version__'], author=about['__author__'], author_email=about['__author_email__'], license=about['__license__'], keywords=about['__keywords__'],
def check_requirements(file='requirements.txt'): # Check installed dependencies meet requirements import pkg_resources requirements = pkg_resources.parse_requirements(Path(file).open()) requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements] pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
self.announce(f'Cleaning: {path}', level=log.INFO) shutil.rmtree(path) clean.run(self) def get_description(desc_file_path): """read description from README.md""" with open(desc_file_path, 'r', encoding='utf-8') as fstream: description = fstream.read() return description with (SCRIPT_DIR / 'requirements.txt').open() as requirements: install_reqs = [ str(requirement) for requirement in pkg_resources.parse_requirements(requirements) ] setup( name='openvino-dev', version=os.getenv('OPENVINO_VERSION', '0.0.0'), author='Intel® Corporation', license='OSI Approved :: Apache Software License', author_email='*****@*****.**', url='https://docs.openvino.ai/latest/index.html', download_url='https://github.com/openvinotoolkit/openvino/tags', description='OpenVINO(TM) Development Tools', long_description=get_description( SCRIPT_DIR.parents[1] / 'docs/install_guides/pypi-openvino-dev.md'), long_description_content_type='text/markdown', classifiers=[
#!/usr/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'docker-compose==1.8.0','console_scripts','docker-compose' # __requires__ = 'docker-compose==1.8.0' import pkg_resources import psutil # def ss(ss:pkg_resources.EntryPoint): # ss. # pass if __name__ == '__main__': # print(psutil.users()) pa = psutil.Process() psdir = dir(psutil) # print(psutil.users()) requ = pkg_resources.parse_requirements(psutil) print(type(requ)) requ() # requ. # for i in requ: # print(i) info = pkg_resources.get_entry_info('docker-compose==1.8.0', 'console_scripts', 'docker-compose') # info1=pkg_resources.get_entry_info('psutil==5.4.8', 'console_scripts', 'users') # print(info.name) # print(info.attrs) # print(info.dist) # print(info.extras) # ss(info) # sys.exit( # load_entry_point('docker-compose==1.8.0', 'console_scripts', 'docker-compose')()
def testEmptyParse(self): assert list(parse_requirements('')) == []
def _check_extra(extra, reqs): name, sep, marker = extra.partition(':') if marker and pkg_resources.invalid_marker(marker): raise DistutilsSetupError("Invalid environment marker: " + marker) list(pkg_resources.parse_requirements(reqs))
def check_requirements(file='requirements.txt', exclude=()): # Check installed dependencies meet requirements import pkg_resources requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open()) if x.name not in exclude] pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
def test_local_version(self): req, = parse_requirements('foo==1.0.org1')
def checkRequires(self, dist, txt, extras=()): assert list(dist.requires(extras)) == list(parse_requirements(txt))
def test_environment_marker_evaluation_negative(self): """Environment markers are evaluated at resolution time.""" ad = pkg_resources.Environment([]) ws = WorkingSet([]) res = ws.resolve(parse_requirements("Foo;python_version<'2'"), ad) assert list(res) == []
extra_options['zip_safe'] = False # extra_options['python_requires'] = ( # # NOTE: keep in sync with Trove classifier list below. # '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*') extra_options['python_requires'] = ( # NOTE: keep in sync with Trove classifier list below. '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*') try: import pkg_resources except ImportError: pass else: f = open("requirements.txt", "r") try: deps = [str(req) for req in pkg_resources.parse_requirements(f)] finally: f.close() extra_options['extras_require'] = { 'source': deps, } extra_options['package_data'] = { 'pyxpdf.includes': [ '*.pxd', '*.h' ], } extra_options['package_dir'] = { '': 'src' }
def _get_requirements(): with (Path(__file__).parent / 'requirements.txt').open() as fp: return [str(requirement) for requirement in parse_requirements(fp)]
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from os.path import dirname, join, realpath from pkg_resources import parse_requirements from setuptools import find_packages, setup _install_requires = parse_requirements(open("requirements.in")) install_requires = [str(req) for req in _install_requires] ROOT = realpath(join(dirname(__file__))) setup( name="autorelease", version="0.1.0", packages=find_packages(), author="Mouadh Kaabachi", author_email="*****@*****.**", description="auto release workflow", url="https://github.com", long_description=open("README.rst").read(), install_requires=install_requires, include_package_data=True, classifiers=[ "Programming Language :: Python", "Development Status :: 3 - Alpha", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3.6", ],
# General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Generate minimal requirements from setup.py.""" from __future__ import print_function import mock import pkg_resources import setuptools import sys if __name__ == '__main__': with mock.patch.object(setuptools, 'setup') as mock_setup: import setup # pylint: disable=F401 # called arguments are in `mock_setup.call_args` args, kwargs = mock_setup.call_args install_requires = kwargs.get('install_requires', []) for pkg in pkg_resources.parse_requirements(install_requires): if len(pkg.specs): if pkg.specs[0][0] == '>=': print("{0.project_name}=={0.specs[0][1]}".format(pkg)) elif pkg.specs[0][0] == '>': print("{0.project_name} specify exact minimal version using " "'>=' instead of '>'.".format(pkg), file=sys.stderr)
from pathlib import Path from pkg_resources import parse_requirements from setuptools import find_packages, setup for path in Path('./').rglob('requirements.txt'): with Path(path).open() as requirements_txt: install_requires = [ str(requirement) for requirement in parse_requirements(requirements_txt) ] config = { "description": "Simplified data pipeline blueprints for working with SFTP.", "author": "Shipyard Team", "url": "https: // www.shipyardapp.com", "author_email": "*****@*****.**", "packages": find_packages(), "install_requires": install_requires, "name": "sftp-blueprints", "version": "v0.1.0", "license": "Apache-2.0", "classifiers": [
def get_conditional_requirements(self): crfile = join(dirname(__file__), 'conditional-requirements.txt') for req in pkg_resources.parse_requirements(open(crfile).readlines()): self.conditional_reqs.append(req)
def testEmptyParse(self): self.assertEqual(list(parse_requirements('')), [])
def checkRequires(self, dist, txt, extras=()): self.assertEqual(list(dist.requires(extras)), list(parse_requirements(txt)))
def canonicalizeExtras(myExtras): parsedExtras = {} for name, val in myExtras.items(): parsedExtras[name] = list(parse_requirements(val)) return parsedExtras
def _parse_content(fh): """Parse the content of a script to find marked dependencies.""" content = iter(fh) deps = {} for line in content: # quickly discard most of the lines if 'fades' not in line: continue # discard other string with 'fades' that isn't a comment if '#' not in line: continue # assure that it's a well commented line and no other stuff line = line.strip() import_part, fades_part = line.rsplit("#", 1) # discard other comments in the same line that aren't for fades if "fades" not in fades_part: import_part, fades_part = import_part.rsplit("#", 1) fades_part = fades_part.strip() if not fades_part.startswith("fades"): continue if not import_part: # the fades comment was done at the beginning of the line, # which means that the import info is in the next one import_part = next(content).strip() if import_part.startswith('#'): continue # get module import_tokens = import_part.split() if import_tokens[0] == 'import': module_path = import_tokens[1] elif import_tokens[0] == 'from' and import_tokens[2] == 'import': module_path = import_tokens[1] else: logger.warning("Not understood import info: %s", import_tokens) continue module = module_path.split(".")[0] # If fades know the real name of the pkg. Replace it! if module in PKG_NAMES_DB: module = PKG_NAMES_DB[module] # To match the "safe" name that pkg_resources creates: module = module.replace('_', '-') # get the fades info if fades_part.startswith("fades.pypi"): repo = REPO_PYPI marked = fades_part[10:].strip() elif fades_part.startswith("fades") and (len(fades_part) == 5 or fades_part[5] in "<>=! "): # starts with 'fades' only, and continues with a space or a # comparison, not a dot, neither other word stuck together repo = REPO_PYPI marked = fades_part[5:].strip() else: logger.warning("Not understood fades info: %r", fades_part) continue if not marked: # nothing after the pypi token requirement = module elif marked[0] in "<>=!": # the rest is just the version requirement = module + ' ' + marked else: # the rest involves not only a version, but also the project name requirement = marked # record the dependency dependency = list(parse_requirements(requirement))[0] deps.setdefault(repo, []).append(dependency) return deps