def get_reqs(): try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements try: install_reqs = parse_requirements('requirements.txt', session=False) except TypeError: install_reqs = parse_requirements('requirements.txt') reqs = [str(ir.req) for ir in install_reqs if ir.req] return reqs
def _parse_requirements(filepath): pip_version = list(map(int, pkg_resources.get_distribution('pip').version.split('.')[:2])) if pip_version >= [10, 0]: from pip._internal.download import PipSession from pip._internal.req import parse_requirements raw = parse_requirements(filepath, session=PipSession()) elif pip_version >= [6, 0]: from pip.download import PipSession from pip.req import parse_requirements raw = parse_requirements(filepath, session=PipSession()) else: from pip.req import parse_requirements raw = parse_requirements(filepath) return [str(i.req) for i in raw]
def parse_reqs(reqs_file): ''' parse the requirements ''' options = Option("--workaround") options.skip_requirements_regex = None options.isolated_mode = True install_reqs = parse_requirements(reqs_file, options=options, session=PipSession()) return [str(ir.req) for ir in install_reqs]
def get_required(): required_dependencies = [] for e in ['requirements.txt', 'requirements-no-deps.txt']: install_requirements = parse_requirements(e, session='hack') required_dependencies.extend([req.req for req in install_requirements]) return required_dependencies
def test_every_pip_requirement_has_matching_version_in_debian_package(self): """Test pip requirements versions match debian package dependencies.""" for pip_req in parse_requirements(self.requirements_file, session="unittest"): if isinstance(pip_req, InstallRequirement): if pip_req.name in self.dpkg_name: dpkg_name = self.dpkg_name[pip_req.name] else: dpkg_name = "python3-{}".format(pip_req.name) if pip_req.req.specifier: pip_req_version = str(pip_req.req.specifier) debian_package_dependencies = [ pip_req.name+x for x in self.faucet_dpkg_deps[dpkg_name] ] if str(pip_req_version).startswith('=='): # debian/control is annoying about how it handles exact # versions, calculate the debian equivalent of the # pip requirements match and compare that lower_match = pip_req_version.replace('==', '>=') upper_match = pip_req_version.replace('==', '<<').split('.') upper_match[-1] = str(int(upper_match[-1]) + 1) upper_match = '.'.join(upper_match) self.assertIn(pip_req.name+lower_match, debian_package_dependencies) self.assertIn(pip_req.name+upper_match, debian_package_dependencies) else: self.assertIn(pip_req.name+pip_req_version, debian_package_dependencies)
def get_requirements(requirements): """Return an array of requirements.""" req_path = os.path.join( os.path.realpath(os.path.dirname(__file__)), requirements) return [str(item.req) for item in parse_requirements(req_path, session=PipSession())]
def get_test_req(): test_requirements = parse_requirements( os.path.join(ROOT_DIR, 'requirements-test.txt'), session=False ) test_requires = [str(tr.req) for tr in test_requirements] if not sys.platform.startswith('freebsd'): test_requires.append('gnureadline==6.3.3') return test_requires
def parse_reqs(req_files, links=False): """returns a list of requirements from a list of req files""" requirements = set() session = PipSession() for req_file in req_files: # parse_requirements() returns generator of pip.req.InstallRequirement objects parsed = parse_requirements(req_file, session=session) requirements.update({str(ir.req) if not links else ir.link.url.replace('git+', '') for ir in parsed if ir.link or not links}) return list(requirements)
def _read_requirements( filename: str="requirements.txt" ) -> typing.Dict[str, typing.List[str]]: reqs = list(parse_requirements(filename, session="libioc")) return dict( install_requires=list(map(lambda x: f"{x.name}{x.specifier}", reqs)), dependency_links=list(map( lambda x: str(x.link), filter(lambda x: x.link, reqs) )) )
def _requirements(self): requirements_txt = os.path.join(self.target_directory, 'requirements.txt') logger.debug(requirements_txt) if os.path.isfile(requirements_txt): requirements = parse_requirements(requirements_txt, session=False) self.requirements = [req.name.strip().lower() for req in requirements] logger.debug('requirements modules count: {count} ({modules})'.format(count=len(self.requirements), modules=','.join(self.requirements))) else: logger.debug('requirements.txt not found!') self.requirements = []
def get_requirements(requirements_file): """Use pip to parse requirements file.""" requirements = [] if path.isfile(requirements_file): for req in parse_requirements(requirements_file, session="hack"): # check markers, such as # # rope_py3k ; python_version >= '3.0' # if req.match_markers(): requirements.append(str(req.req)) return requirements
def test_every_pip_requirement_in_debian_package(self): """Test pip requirements are listed as dependencies on debian package.""" for pip_req in parse_requirements(self.requirements_file, session="unittest"): if isinstance(pip_req, InstallRequirement): if pip_req.name in self.dpkg_name: dpkg_name = self.dpkg_name[pip_req.name] else: dpkg_name = "python3-{}".format(pip_req.name) self.assertIn(dpkg_name, self.faucet_dpkg_deps)
def find_python_pip(self, file_path): for requirement in file_path: if 'requirements.txt' in requirement: reqs = parse_requirements(filename=requirement, session=False) for r in reqs: module_ = r.name version_ = r.specifier self._framework.append(module_) self._result.update( { module_: { 'version': str(version_), 'format': 'python', } } ) elif 'package.json' in requirement: self.find_nodejs_npm([requirement])
from setuptools import find_packages, setup import setuptools import io try: from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements # Read in the README for the long description on PyPI with io.open('README.md', 'r', encoding='utf-8') as f: readme = f.read() reqs = parse_requirements("requirements.txt", session=False) install_requires = [str(ir.req) for ir in reqs] setup( name='mphyspy', version='0.2.3', description='Python3 library for calculating college-level modern physics', long_description=readme, long_description_content_type="text/markdown", url='https://github.com/eunchan1001/mphyspy.git', author='Eunchan Cho', author_email='*****@*****.**', license='MIT', install_requires=install_requires, packages=find_packages(), classifiers=[ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent",
from pip.download import PipSession from pip.req import parse_requirements with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="mans_to_es", version="1.4", author="LDO-CERT", author_email="*****@*****.**", description="Send .mans to ElasticSearch", long_description=long_description, long_description_content_type="text/markdown", license='Apache License, Version 2.0', url="https://github.com/LDO-CERT/mans_to_es", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent", ], entry_points={ 'console_scripts': ['mans_to_es=mans_to_es.mans_to_es:main'] }, install_requires=[ str(req.req) for req in parse_requirements( 'requirements.txt', session=PipSession(), ) ], )
''' python3 setup.py sdist twine upload dist/* ''' from distutils.core import setup try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements install_requires = [ str(ir.req) for ir in parse_requirements('requirements.txt', session=False) ] setup( name='safrs', packages=['safrs'], version='2.2.1', license='MIT', description='safrs : SqlAlchemy Flask-Restful Swagger2', long_description=open('README.rst').read(), author='Thomas Pollet', author_email='*****@*****.**', url='https://github.com/thomaxxl/safrs', download_url='https://github.com/thomaxxl/safrs/archive/1.2.1.tar.gz', keywords=['SqlAlchemy', 'Flask', 'REST', 'Swagger', 'JsonAPI', 'OpenAPI'], python_requires='>=3.0, !=3.0.*, !=3.1.*, !=3.2.*, <4', install_requires=install_requires, classifiers=[ 'Development Status :: 3 - Alpha',
def setup(path, ext_modules=None): ext_modules = ext_modules or [] config = get_config() package_root = config.get_value('package_root') # Guess environment if "--development" in argv: environment = 'development' argv.remove("--development") else: environment = 'production' # Build if any(x in argv for x in ('build', 'install', 'sdist', 'egg_info')): version = build(path, config, environment) else: version = get_package_version(package_root) # Initialize variables package_name = config.get_value('package_name') if package_name is None: package_name = config.get_value('name') packages = [package_name] package_data = {package_name: []} # The sub-packages if config.has_value('packages'): subpackages = config.get_value('packages') for subpackage_name in subpackages: packages.append('%s.%s' % (package_name, subpackage_name)) else: subpackages = [] # Python files are included by default filenames = [ x.strip() for x in open(path + 'MANIFEST').readlines() ] filenames = [ x for x in filenames if not x.endswith('.py') ] # The data files prefix = '' if package_root == '.' else package_root + '/' prefix_n = len(prefix) for line in filenames: if not line.startswith(prefix): continue line = line[prefix_n:] path = line.split('/') if path[0] in subpackages: subpackage = '%s.%s' % (package_name, path[0]) files = package_data.setdefault(subpackage, []) files.append(join_path(*path[1:])) elif path[0] not in ('archive', 'docs', 'scripts', 'test'): package_data[package_name].append(line) # The scripts if config.has_value('scripts'): scripts = config.get_value('scripts') scripts = [ join_path(*['scripts', x]) for x in scripts ] else: scripts = [] # Long description if exists('README.rst'): with codecs.open('README.rst', 'r', 'utf-8') as readme: long_description = readme.read() else: long_description = config.get_value('description') author_name = config.get_value('author_name') # Requires install_requires = [] if exists('requirements.txt'): install_requires = parse_requirements( 'requirements.txt', session=PipSession()) install_requires = [str(ir.req) for ir in install_requires] # XXX Workaround buggy distutils ("sdist" don't likes unicode strings, # and "register" don't likes normal strings). if 'register' in argv: author_name = unicode(author_name, 'utf-8') classifiers = [ x for x in config.get_value('classifiers') if x ] core.setup(name = package_name, version = version, # Metadata author = author_name, author_email = config.get_value('author_email'), license = config.get_value('license'), url = config.get_value('url'), description = config.get_value('title'), long_description = long_description, classifiers = classifiers, # Packages package_dir = {package_name: package_root}, packages = packages, package_data = package_data, # Requires / Provides install_requires=install_requires, # Scripts scripts = scripts, cmdclass = {'build_ext': OptionalBuildExt}, # C extensions ext_modules=ext_modules)
from setuptools import setup, find_packages # https://stackoverflow.com/a/49867265/1869821 try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements VERSION = '1.0.1' INSTALL_REQUIRES = [ str(r.req) for r in parse_requirements('requirements.txt', session=False) ] setup( name="judge-pics", description="Database of Judge Pictures", version=VERSION, author="Mike Lissner", author_email="*****@*****.**", maintainer="Mike Lisser", maintainer_email="*****@*****.**", packages=find_packages(exclude=('tests',)), include_package_data=True, package_data={ 'judge_pics': [ 'data/128/*', 'data/256/*', 'data/512/*', 'data/orig/*', 'data/judges.json',
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import uuid try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') requirements = parse_requirements("requirements.txt", session=uuid.uuid1()) entry_points = { 'console_scripts': [ 'nectar-metrics-nova = nectar_metrics.nova:main', 'nectar-metrics-cinder = nectar_metrics.cinder:main', 'nectar-metrics-rcshibboleth = nectar_metrics.rcshibboleth:main', 'nectar-metrics-whisper = nectar_metrics.whisper:main', ], 'ceilometer.poll.central': [ 'nectar.instances = nectar_metrics.ceilometer.compute.pollster:ComputePollster', # noqa 'nectar.volumes = nectar_metrics.ceilometer.volume.cinder:CinderPollster', # noqa 'nectar.cinder_pools = nectar_metrics.ceilometer.volume.cinder:CinderPoolPollster', # noqa 'nectar.allocations.status = nectar_metrics.ceilometer.allocation.pollster:AllocationStatusPollster', # noqa 'nectar.allocations.novaquota = nectar_metrics.ceilometer.allocation.pollster:NovaQuotaAllocationPollster', # noqa 'nectar.allocations.cinderquota = nectar_metrics.ceilometer.allocation.pollster:CinderQuotaAllocationPollster', # noqa 'nectar.allocations.swiftquota = nectar_metrics.ceilometer.allocation.pollster:SwiftQuotaAllocationPollster', # noqa
def open_required_pip(file_name): install_requirements = parse_requirements(file_name, session='hack') records = [str(ir.req) for ir in install_requirements] #return open_required(file_name) return records
# -*- coding: utf-8 -*- from setuptools import setup, find_packages try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements import re, ast # get version from __version__ variable in tandemarbor/__init__.py _version_re = re.compile(r'__version__\s+=\s+(.*)') with open('tandemarbor/__init__.py', 'rb') as f: version = str( ast.literal_eval( _version_re.search(f.read().decode('utf-8')).group(1))) requirements = parse_requirements("requirements.txt", session="") setup(name='tandemarbor', version=version, description='Tander Arbor ERPNext Customization', author='*****@*****.**', author_email='*****@*****.**', packages=find_packages(), zip_safe=False, include_package_data=True, install_requires=[str(ir.req) for ir in requirements], dependency_links=[str(ir._link) for ir in requirements if ir._link])
from pip._internal.network.session import PipSession elif pip_major_version >= 10: from pip._internal.download import PipSession from pip._internal.req import parse_requirements else: # for pip <= 9.0.3 from pip.download import PipSession from pip.req import parse_requirements with open("README.md", "r") as fh: long_description = fh.read() if (pip_major_version == 20 and pip_minor_version >= 1) or pip_major_version > 20: install_requires = [ str(req.requirement) for req in parse_requirements( "requirements.txt", session=PipSession(), ) ] else: try: install_requires = [ str(req.req) for req in parse_requirements( "requirements.txt", session=PipSession(), ) ] except: install_requires = [ str(req) for req in parse_requirements( "requirements.txt", session=PipSession(),
from pip.req import parse_requirements # try: # from pip.req import parse_requirements # except ImportError: # sys.exit('ERROR: pip is required.\n') if os.environ.get('READTHEDOCS', None): # Set empty install_requires to get install to work on readthedocs install_requires = [] else: if sys.version_info[0] > 2: req_file = 'requirements.txt' else: req_file = 'requirements2.txt' try: reqs = parse_requirements(req_file, session=False) except TypeError: reqs = parse_requirements(req_file) install_requires = [str(r.req) for r in reqs] # read version exec(open('abutils/version.py').read()) config = { 'description': 'Utilities for analysis of antibody NGS data', 'author': 'Bryan Briney', 'url': 'https://www.github.com/briney/abutils', 'author_email': '*****@*****.**', 'version': __version__, 'install_requires': install_requires, 'packages': ['abutils'],
# Setup.py allows audius-discovery-provider as a redistributable package # Currently, the repository is not configured as such but may be moving forward # https://caremad.io/posts/2013/07/setup-vs-requirement/ import uuid from setuptools import setup, find_packages from pip._internal.req import parse_requirements install_reqs = parse_requirements("requirements.txt", session=uuid.uuid1()) requirements = [str(ir.req) for ir in install_reqs] config = { "description": "Audius Discovery Provider", "author": "Hareesh Nagara", "url": "", "download_url": "", "author_email": "", "version": "0.1", "install_requires": requirements, "packages": find_packages(), "scripts": [], "name": "audius_discovery_provider", } setup(**config)
#!/usr/bin/env python from setuptools import setup try: from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements reqs = parse_requirements('requirements.txt', session='hack') reqs = [str(ir.req) for ir in reqs] setup( name='species', version='0.0.6', description= 'Toolkit for analyzing spectral and photometric data of planetary and substellar atmospheres', long_description=open('README.rst').read(), long_description_content_type='text/x-rst', author='Tomas Stolker', author_email='*****@*****.**', url='https://github.com/tomasstolker/species', packages=[ 'species', 'species.analysis', 'species.core', 'species.data', 'species.plot', 'species.read', 'species.util' ], package_dir={'species': 'species'}, include_package_data=True, install_requires=reqs, license='GPLv3', zip_safe=False,
def load_requirements(fname): requirements = parse_requirements(os.path.join(os.path.dirname(__file__), fname), session=PipSession()) return [str(requirement.requirement) for requirement in requirements]
except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements import pkg_resources here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'VERSION')) as f: VERSION = f.read().strip() pr_kwargs = {} if pkg_resources.get_distribution("pip").version >= '6.0': pr_kwargs = {"session": False} install_reqs = parse_requirements( os.path.join( here, './requirements.txt' if not sys.argv[1] in ['develop', 'test'] else './requirements-dev.txt' ), **pr_kwargs) setup( name='discovery', version=VERSION, author='Clever (https://clever.com)', author_email='*****@*****.**', url='https://github.com/Clever/discovery-python/', packages=['discovery'], install_requires=[str(ir.req) for ir in install_reqs], setup_requires=['nose>=1.0'], test_suite='nose.collector', long_description="""\ Programmatically find service endpoints.
except ImportError: # pip <= 9.0.3 from pip.download import PipSession from pip.req import parse_requirements __version__ = '0.2.1' github_url = 'https://github.com/ciotto' package_name = 'telegram-bot-deploy' package_path = os.path.abspath(os.path.dirname(__file__)) long_description_file_path = os.path.join(package_path, 'README.md') long_description = '' try: install_requirements = [ str(ir.req) for ir in parse_requirements('requirements.txt', session=PipSession()) ] except AttributeError: install_requirements = [ str(ir.requirement) for ir in parse_requirements('requirements.txt', session=PipSession()) ] try: with open(long_description_file_path) as f: long_description = f.read() except IOError: pass setup( name=package_name, packages=find_packages(exclude=['docs', 'tests*']),
#!/usr/bin/env python try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements install_reqs = parse_requirements('requirements.txt', session=False) reqs = [str(ir.req) for ir in install_reqs] dep_links = [str(req_line.url) for req_line in install_reqs] setup( name='bb_circadian', version='0.2', description='BeesBook circadian behaviour analysis', author='David Dormagen', author_email='*****@*****.**', url='https://github.com/walachey/bb_circadian/', install_requires=reqs, dependency_links=dep_links, packages=find_packages(), package_dir={'bb_circadian': 'bb_circadian/'} )
def requirements(path): items = parse_requirements(path, session=uuid.uuid1()) return [ ";".join((str(r.req), str(r.markers))) if r.markers else str(r.req) for r in items ]
#!/usr/bin/env python import os from setuptools import setup from setuptools import find_packages try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements install_reqs = parse_requirements(os.path.join(os.path.dirname(__file__), "requirements", "base.txt"), session=False) reqs = [str(ir.req) for ir in install_reqs] def read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read() setup( name = "pyGE", version = "0.0.3", description = "Small wrapper around some Geekevents API endpoints", author = "Kristoffer Dalby", author_email = "*****@*****.**", url = "https://github.com/kradalby/pyGE", keywords = ["geekevents"], classifiers = [ "Programming Language :: Python :: 3", "Environment :: Other Environment", "Intended Audience :: Developers",
def load_requirements(fname): reqs = parse_requirements(fname, session="test") return [str(ir.req) for ir in reqs]
from pip.download import PipSession try: from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements BASE_DIR = os.path.abspath(os.path.dirname(__file__)) if sys.version_info[0] == 2: from codecs import open # Read requirements _requirements_file = os.path.join(BASE_DIR, 'requirements.txt') _REQUIRES = [ str(r.req) for r in parse_requirements(_requirements_file, session=PipSession()) ] # Read description with open(os.path.join(BASE_DIR, 'README.rst'), encoding='utf-8') as f: _LONG_DESCRIPTION = f.read() _CLASSIFIERS = ( 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries :: Python Modules',
#!/usr/bin/env python from setuptools import setup, find_packages try: # for pip >= 10 from pip._internal.req import parse_requirements from pip._internal.download import PipSession except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements from pip.download import PipSession install_requires = parse_requirements('requirements.txt', session=PipSession()) dependencies = [str(package.req) for package in install_requires] from distutils.core import setup setup(name='hydra-flock-central-controller', version='0.0.1', include_package_data=True, description='A simulation for HYDRA: Central Controller API', author='Hydra Ecosystem', author_email='*****@*****.**', url='https://github.com/HTTP-APIs/hydra-flock-central-controller', install_requires=dependencies)
# along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### try: # for pip >= 10 from pip._internal.req import parse_requirements from pip._internal.download import PipSession except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements from pip.download import PipSession from distutils.core import setup from setuptools import find_packages # Parse requirements.txt to get the list of dependencies inst_req = parse_requirements('requirements.txt', session=PipSession()) REQUIREMENTS = [str(r.req) for r in inst_req] setup(name='GeoNode', version=__import__('geonode').get_version(), description="Application for serving and sharing geospatial data", long_description=open('README.rst').read(), classifiers=[ "Development Status :: 4 - Beta"], keywords='', author='GeoNode Developers', author_email='*****@*****.**', url='http://geonode.org', license='GPL', packages=find_packages(), package_data={
from setuptools import find_packages from setuptools import setup from setuptools.command.develop import develop from setuptools.command.install import install try: from pip._internal.req import parse_requirements except: from pip.req import parse_requirements if __name__ == '__main__': requirements_path = 'requirements.txt' if sys.version_info[0] < 3: requirements_path = 'requirements_py2.txt' install_reqs = parse_requirements(requirements_path, session=False) reqs = [str(ir.req) for ir in install_reqs] setup(name='nlg-eval', version='2.3', description="Wrapper for multiple NLG evaluation methods and metrics.", author='Shikhar Sharma, Hannes Schulz, Justin Harris', author_email='[email protected], [email protected], [email protected]', url='https://github.com/Maluuba/nlg-eval', packages=find_packages(), include_package_data=True, scripts=['bin/nlg-eval'], install_requires=reqs, )
def setup_package(): """Configuration of the setup""" if parse_setuppy_commands(): extra_setuptools_args = dict() else: from setuptools import find_packages, Extension from glob import glob try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements import pip # Dependencies for building C Extensions try: dependencies = list(parse_requirements('requirements.txt', session=False)) except TypeError: # new versions of pip requires a session dependencies = list(parse_requirements('requirements.txt', session=pip.download.PipSession())) dependencies = [str(package.req) for package in dependencies] # Installing the required packages to build C extensions for package in dependencies: pip.main(['install', package]) import numpy c_extensions = [Extension(ext_name, sources=['src/pyhrf/' + filepath]) for (ext_name, filepath) in [('pyhrf.jde.intensivecalc', 'jde/intensivecalc.c'), ('pyhrf.boldsynth.pottsfield.pottsfield_c', 'boldsynth/pottsfield/pottsField.c'), ('pyhrf.vbjde.UtilsC', 'vbjde/utilsmodule.c'), ('pyhrf.cparcellation', 'cparcellation.c')]] extra_setuptools_args = dict( package_dir={'': 'python'}, packages=find_packages("python"), include_package_data=True, scripts=glob('./bin/*'), zip_safe=False, # pyhrf has C/C++ extensions, so it's not zip safe. ext_modules=c_extensions, include_dirs=[numpy.get_include()], setup_requires=dependencies, install_requires=dependencies, extras_require={"cluster": ["soma-workflow"], "simulation": ["Pillow>=2.3"], "parcellation": ["munkres>=1.0"], "pipelines": ["pygraphviz>=1.1"], "graph": ["python-graph-core>=1.8"]}) # Get the long description from the README file with open('README.rst') as readme_file: long_description = readme_file.read() metadata = dict( name='pyhrf', version='0.5.0', description='Analysis of fMRI data based on the study of hemodynamics', long_description=long_description, maintainer='Jaime Arias', maintainer_email='*****@*****.**', url='http://pyhrf.org', license='CeCILLv2', download_url='https://github.com/pyhrf/pyhrf', classifiers=[ "Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: C", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Medical Science Apps.", ], **extra_setuptools_args ) setup(**metadata)
except ImportError: # pip < 10.0.0 from pip import req as pip_req VERSION_OPERATOR = re.compile('[><=!]') parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('filename', help='path of requirments file to parse') parser.add_argument('-p', '--python-version', default='{0.major}.{0.minor}'.format(sys.version_info), help='python version to use (default: %(default)s)') parser.add_argument('-o', '--output', help='path of output file, ' 'defaults to stdout') args = parser.parse_args() requirements = ["python={0.python_version}.*".format(args)] for item in pip_req.parse_requirements(args.filename, session='gwpyci'): # if environment markers don't pass, skip if item.markers and not item.markers.evaluate(): continue # if requirement is a URL, skip if item.original_link: continue requirements.append('{0.name}{0.req.specifier}'.format(item)) tmp = tempfile.mktemp() # clean up when we're done def _clean(): if os.path.isfile(tmp): os.remove(tmp) atexit.register(_clean)
datafiles.append(( os.path.join(dest, os.path.join(*(root.split('/')[src_comps - 1:]))), included_files )) return datafiles if not on_readthedocs and not os.path.exists("build/templates"): print('You need to run "console jinja compile" before you build.') sys.exit(1) install_requires = [] for install_require in chain(parse_requirements('requirements.txt', session=PipSession()), parse_requirements('mysql-requirements.txt', session=PipSession())): if install_require.req is not None: install_requires.append(str(install_require.req)) else: raise Exception("Couldn't parse requirement from requirements.txt") def build_opmuse(): config = ConfigParser() config.read('setup.cfg') if not os.path.exists('build'): os.mkdir('build') copy('config/opmuse.dist.ini', 'build/opmuse.ini')
* limitations under the License. * * @section DESCRIPTION """ from setuptools import setup try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements # load dependencies from external files requirements = [] for e in ['../utils/requirements.txt', '../utils/requirements-no-deps.txt']: install_reqs = parse_requirements(e, session='hack') requirements.extend([str(ir.req) for ir in install_reqs]) setup( name='CTS', version='2.4.0.173.0', packages=['main', 'cts_core', 'cts_core.commons', 'cts_core.measure', 'cts_core.metadata', 'cts_core.metadata.model', 'cts_core.metadata.model.metadata_types', 'cts_core.metadata.model.metadata_types.primitive_types', 'cts_core.metadata.primitive_types_helpers', 'cts_core.metadata.diff',
from setuptools import setup, find_packages import uuid try: from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements session = uuid.uuid1() version = '0.1' requirements = parse_requirements("requirements.txt", session=session) setup(name='langstroth', version=version, description="Status page for the NeCTAR Research Cloud.", long_description="""\ """, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: ' + 'GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4'],
import os import uuid import glob from setuptools import setup, find_packages try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements if os.environ.get('USER','') == 'vagrant': del os.link os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1()) reqs = [str(ir.req) for ir in install_reqs] setup( name = "illumina-utils", version = open('VERSION').read().strip(), description = "A library and collection of scripts to work with Illumina paired-end data (for CASAVA 1.8+).", author = u"A. Murat Eren", author_email = "*****@*****.**", license = "GPLv3+", url = "https://github.com/meren/illumina-utils", packages = find_packages(), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console',
# -*- coding: utf-8 -*- import os import sys from setuptools import setup, find_packages from distutils.command.install_data import install_data try: # for pip >= 10 from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements cmdclass = {'install_data': install_data} data_files = [('/etc/MissingTVShows/', ['etc/tvshows.cfg', 'etc/logging.conf']), ('/usr/local/etc/bash_completion.d/', ['etc/missingTVShows-completion.bash'])] # parse_requirements() returns generator of pip.req.InstallRequirement objects install_reqs = parse_requirements('requirements.txt', session=False) reqs = [str(ir.req) for ir in install_reqs] tests_require = ['nose'] if sys.version_info[:2] == (2, 6): # Python unittest2 only needed for Python 2.6 tests_require.append('unittest2') # OrderedDict was added in 2.7 reqs.append('ordereddict') # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level
from pip._internal.req import parse_requirements except ImportError: # for pip <= 9.0.3 from pip.req import parse_requirements from setuptools import setup, find_packages __authors__ = ['"Hans Lellelid" <*****@*****.**>'] __copyright__ = "Copyright 2018 Hans Lellelid" version = '1.1.9' long_description=""" The freezing saddles cycling competition website/scoreboard. """ # parse_requirements() returns generator of pip.req.InstallRequirement objects install_reqs = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt'), session=uuid.uuid1()) # reqs is a list of requirement # e.g. ['django==1.5.1', 'mezzanine==1.4.6'] reqs = [str(ir.req) for ir in install_reqs] setup(name='freezing-web', version=version, description="Freezing Saddles website component.", long_description=long_description, author="Hans Lellelid", author_email="*****@*****.**", # This is a workaround for https://github.com/pypa/setuptools/issues/97 packages = ['freezing.web', 'freezing.web.views', 'freezing.web.utils'], # packages=find_packages(include=['freezing.web.*'])
def resolve_requires(requirements_file): requirements = parse_requirements("./%s" % requirements_file, session=False) return [str(ir.req) for ir in requirements]
'your raw data with rich annotations, comments, tags and stars.') setup( name='timesketch', version=timesketch_version, description='Digital forensic timeline analysis', long_description=timesketch_description, license='Apache License, Version 2.0', url='http://www.timesketch.org/', maintainer='Timesketch development team', maintainer_email='*****@*****.**', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Operating System :: OS Independent', 'Programming Language :: Python', ], data_files=[ ('share/timesketch', ['timesketch.conf', 'config/features.yaml'])], packages=find_packages(), include_package_data=True, zip_safe=False, entry_points={'console_scripts': ['tsctl=timesketch.tsctl:main']}, install_requires=[str(req.req) for req in parse_requirements( 'requirements.txt', session=PipSession(), )], tests_require=[str(req.req) for req in parse_requirements( 'test_requirements.txt', session=PipSession(), )], )
def run(self, options, args): with self._build_session(options) as session: reqs_to_uninstall = {} for name in args: req = InstallRequirement.from_line( name, isolated=options.isolated_mode, ) if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req for filename in options.requirements: for req in parse_requirements( filename, options=options, session=session): if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req if not reqs_to_uninstall: raise InstallationError( 'You must give at least one requirement to %(name)s (see ' '"pip help %(name)s")' % dict(name=self.name) ) protect_pip_from_modification_on_windows( modifying_pip="pip" in reqs_to_uninstall ) for req in reqs_to_uninstall.values(): uninstall_pathset = req.uninstall( auto_confirm=options.yes, verbose=self.verbosity > 0, ) if uninstall_pathset: uninstall_pathset.commit()