def setup_extension_modules(self): """Sets up the C/C++/CUDA extension modules for this distribution. Create list of extensions for Python modules within the openmoc Python package based on the user-defined flags defined at compile time. """ try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() # Add the NumPy include directory to the include directories # list for each type of compiler for cc in self.include_directories.keys(): self.include_directories[cc].append(numpy_include) # The main openmoc extension (defaults are gcc and single precision) self.extensions.append( Extension(name = '_rksolver', sources = copy.deepcopy(self.sources[self.cc]), library_dirs = self.library_directories[self.cc], libraries = self.shared_libraries[self.cc], extra_link_args = self.linker_flags[self.cc], include_dirs = self.include_directories[self.cc], swig_opts = self.swig_flags + ['-D' + self.cc.upper()]))
def get_numpy_include(): # Obtain the numpy include directory. This logic works across numpy # versions. # setuptools forgets to unset numpy's setup flag and we get a crippled # version of it unless we do it ourselves. try: # Python 3 renamed the ``__builin__`` module into ``builtins``. # Here we import the python 2 or the python 3 version of the module # with the python 3 name. This could be done with ``six`` but that # module may not be installed at that point. import __builtin__ as builtins except ImportError: import builtins builtins.__NUMPY_SETUP__ = False try: import numpy as np except ImportError: print('*** package "numpy" not found ***') print("MDAnalysis requires a version of NumPy (>=1.5.0), even for setup.") print("Please get it from http://numpy.scipy.org/ or install it through " "your package manager.") sys.exit(-1) try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() return numpy_include
def get_numpy_include(): try: # Obtain the numpy include directory. This logic works across numpy # versions. # setuptools forgets to unset numpy's setup flag and we get a crippled # version of it unless we do it ourselves. try: import __builtin__ # py2 __builtin__.__NUMPY_SETUP__ = False except: import builtins # py3 builtins.__NUMPY_SETUP__ = False import numpy as np except ImportError as e: print(e) print('*** package "numpy" not found ***') print('Simpletraj requires a version of NumPy, even for setup.') print('Please get it from http://numpy.scipy.org/ or install it through ' 'your package manager.') sys.exit(-1) try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() return numpy_include
def get_numpy_include_path(): """ Gets the path to the numpy headers. """ # We need to go through this nonsense in case setuptools # downloaded and installed Numpy for us as part of the build or # install, since Numpy may still think it's in "setup mode", when # in fact we're ready to use it to build astropy now. if sys.version_info[0] >= 3: import builtins if hasattr(builtins, '__NUMPY_SETUP__'): del builtins.__NUMPY_SETUP__ import imp import numpy imp.reload(numpy) else: import __builtin__ if hasattr(__builtin__, '__NUMPY_SETUP__'): del __builtin__.__NUMPY_SETUP__ import numpy reload(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
def main(): install_requires = ['pyparsing>=2.0.0'] #if sys.version_info[0] >= 3: # install_requires = ['pyparsing>=2.0.0'] #else: # pyparsing >= 2.0.0 is not compatible with Python 2 # install_requires = ['pyparsing<2.0.0'] ka = dict(name = "pyregion", version = __version__, description = "python parser for ds9 region files", author = "Jae-Joon Lee", author_email = "*****@*****.**", url="http://leejjoon.github.com/pyregion/", download_url="http://github.com/leejjoon/pyregion/downloads", license = "MIT", platforms = ["Linux","MacOS X"], packages = ['pyregion'], package_dir={'pyregion':'lib'}, install_requires = install_requires, use_2to3 = False, ) ka["classifiers"]=['Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', 'Programming Language :: Cython', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Astronomy', ] if WITH_FILTER: try: import numpy except ImportError: warnings.warn("numpy must be installed to build the filtering module.") sys.exit(1) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() if cmdclass: ka["cmdclass"] = cmdclass ka["ext_modules"] = [ Extension("pyregion._region_filter", [PYREX_SOURCE], include_dirs=['./src', numpy_include, ], libraries=[], ) ] setup(**ka)
def add_numpy_flags(module): "Add the modules flags to build extensions which use numpy" import numpy # TODO: Remove this try statement when it is no longer needed try: module.include_dirs.append(numpy.get_include()) except AttributeError: module.include_dirs.append(numpy.get_numpy_include())
def get_numpy_include(): """ Obtain the numpy include directory. This logic works across numpy versions. """ try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
def test_include_dirs(self): """As a sanity check, just test that get_include and get_numarray_include include something reasonable. Somewhat related to ticket #1405.""" include_dirs = [np.get_include(), np.get_numarray_include(), np.get_numpy_include()] for path in include_dirs: assert isinstance(path, (str, unicode)) assert path != ''
def get_numpy_include_path(): import six if hasattr(six.moves.builtins, '__NUMPY_SETUP__'): del six.moves.builtins.__NUMPY_SETUP__ import numpy six.moves.reload_module(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
def get_numpy_include_path(): """ Gets the path to the numpy headers. """ import numpy try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration import numpy config = Configuration('core', parent_package, top_path) config.set_options(quiet=True) config.add_subpackage('image') config.add_subpackage('orient') config.add_subpackage('parallel') config.add_subpackage('learn') try: numpy_include = numpy.get_include() except: numpy_include = numpy.get_numpy_include() #@UndefinedVariable config.add_include_dirs(numpy_include) return config
def get_ext_modules(use_cython): from numpy import get_include as get_numpy_include cython_modules, cython_sources = get_cython_sources(use_cython) ext_modules = [ Extension('pywt._extensions.{0}'.format(module), sources=[make_ext_path(source)], # Doesn't automatically rebuild if library changes depends=c_lib[1]['sources'] + c_lib[1]['depends'], include_dirs=[make_ext_path("c"), get_numpy_include()], define_macros=c_macros + cython_macros, libraries=[c_lib[0]],) for module, source, in zip(cython_modules, cython_sources) ] return ext_modules
def __compileDistUtils__(hash, includeDirs, lib_dirs, libraries, doOptimizeGcc = True, additonalSources = []): ''' Compiles a inline c module with the distutils. First a swig interface is used to generated swig wrapper coder which is than compiled into a python module. @brief Compiles a inline c module with distutils. @param hash Name of the c, interface and module files @param includeDirs List of directories in which distutils looks for headers needed @param lib_dirs List of directories in which distutils looks for libs needed @param libraries List of libraries which distutils uses for binding @return None ''' try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() includeDirs.extend([numpy_include, os.curdir]) iFileName = hash + ".i" cFileName = hash + "."+C_FILE_SUFFIX cWrapFileName = hash + "_wrap."+__WRAPPER_FILE_SUFFIX__ subprocess.check_call(["swig", "-python", "-c++", iFileName]) extra_compile_args = [] if doOptimizeGcc: extra_compile_args = ["-pthread","-O3","-march=native","-mtune=native"] sourcesList = [cFileName, cWrapFileName] sourcesList.extend(additonalSources) module1 = Extension('_%s' % hash, sources=sourcesList, library_dirs=lib_dirs, libraries=libraries, include_dirs=includeDirs, extra_compile_args = extra_compile_args) setup (script_args=['build'], name='_%s' % hash, version='1.0', description='SWICE JIT lib', ext_modules=[module1], include_dirs=includeDirs)
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from numpy import get_include as get_numpy_include config = Configuration('_extensions', parent_package, top_path) sources = ["c/common.c", "c/convolution.c", "c/wt.c", "c/wavelets.c"] source_templates = ["c/convolution.template.c", "c/wt.template.c"] headers = ["c/templating.h", "c/wavelets_coeffs.h", "c/common.h", "c/convolution.h", "c/wt.h", "c/wavelets.h"] header_templates = ["c/convolution.template.h", "c/wt.template.h", "c/wavelets_coeffs.template.h"] config.add_extension( '_pywt', sources=["_pywt.c"] + sources, depends=source_templates + header_templates + headers, include_dirs=["c", get_numpy_include()], define_macros=[("PY_EXTENSION", None)], ) config.add_extension( '_dwt', sources=["_dwt.c"] + sources, depends=source_templates + header_templates + headers, include_dirs=["c", get_numpy_include()], define_macros=[("PY_EXTENSION", None)], ) config.add_extension( '_swt', sources=["_swt.c"] + sources, depends=source_templates + header_templates + headers, include_dirs=["c", get_numpy_include()], define_macros=[("PY_EXTENSION", None)], ) config.make_config_py() return config
cythonize_opts['linetrace'] = True cython_macros.append(("CYTHON_TRACE_NOGIL", 1)) # By default C object files are rebuilt for every extension # C files must be built once only for coverage to work c_lib = ('c_edf',{'sources': sources, 'depends': headers, 'include_dirs': [make_ext_path("c"), get_python_inc()], 'macros': c_macros,}) ext_modules = [ Extension('pyedflib._extensions.{0}'.format(module), sources=[make_ext_path(source)], # Doesn't automatically rebuild if library changes depends=c_lib[1]['sources'] + c_lib[1]['depends'], include_dirs=[make_ext_path("c"), get_numpy_include()], define_macros=c_macros + cython_macros, libraries=[c_lib[0]],) for module, source, in zip(cython_modules, cython_sources) ] from setuptools.command.develop import develop class develop_build_clib(develop): """Ugly monkeypatching to get clib to build for development installs See coverage comment above for why we don't just let libraries be built via extensions. All this is a copy of the relevant part of `install_for_development` for current master (Sep 2016) of setuptools. Note: if you want to build in-place with ``python setup.py build_ext``, that will only work if you first do ``python setup.py build_clib``. """
def add_numpy_include(module): "Add the include path needed to build extensions which use numpy." module.include_dirs.append(numpy.get_numpy_include())
'-fno-omit-frame-pointer', '-funroll-loops', '-fstrict-aliasing', '-std=c99', '-vec-report=0', '-par-report=0', '-O3', '-xHost', '-mtune=native', '-Wall', '-openmp'] try: numpy_inc = [numpy.get_include()] except AttributeError: numpy_inc = [numpy.get_numpy_include()] crwalk = Extension('glass.solvers.rwalk.csamplex', sources = ['glass/solvers/rwalk/csamplex_omp.c', 'glass/solvers/rwalk/WELL44497a.c'], include_dirs=numpy_inc, undef_macros=['DEBUG'], libraries=libraries, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args) setup(name = 'Glass', author = 'Jonathan Coles', author_email = '*****@*****.**', version = '1.0', description = 'Gravitational Lensing AnalysiS Software', package_dir = {'glass': 'glass'},
import numpy try: print(numpy.get_include()) except AttributeError: print(numpy.get_numpy_include())
# the generated C sources to SVN). try: from Pyrex.Distutils import build_ext has_pyrex = True except ImportError: has_pyrex = False import numpy # Define a pyrex-based extension module, using the generated sources if pyrex # is not available. if has_pyrex: pyx_sources = ['numpyx.pyx'] cmdclass = {'build_ext': build_ext} else: pyx_sources = ['numpyx.c'] cmdclass = {} pyx_ext = Extension('numpyx', pyx_sources, include_dirs = [numpy.get_numpy_include()]) # Call the routine which does the real work setup(name = 'numpyx', description = 'Small example on using Pyrex to write a Numpy extension', url = 'http://www.scipy.org/Cookbook/Pyrex_and_NumPy', ext_modules = [pyx_ext], cmdclass = cmdclass, )
def __init__(self, *args, **kwargs): sdist.__init__(self, *args, **kwargs) self.unstable = False def run(self): if not self.unstable: version_file = 'hyperion/version.py' content = open(version_file, 'r').read() open(version_file, 'w').write(content.replace('__dev__ = True', "__dev__ = False")) try: sdist.run(self) finally: if not self.unstable: open(version_file, 'w').write(content) numpy_includes = get_numpy_include() cmdclass = {} cmdclass['build_py'] = build_py cmdclass['test'] = HyperionTest cmdclass['sdist'] = custom_sdist ext_modules = [Extension("hyperion.util._integrate_core", ['hyperion/util/_integrate_core.c'], include_dirs=[numpy_includes], extra_compile_args=['-Wno-error=declaration-after-statement']), Extension("hyperion.util._interpolate_core", ['hyperion/util/_interpolate_core.c'], include_dirs=[numpy_includes], extra_compile_args=['-Wno-error=declaration-after-statement']), Extension("hyperion.importers._discretize_sph",
def numpy_include(): try: inc = np.get_include() except AttributeError: inc = np.get_numpy_include() return inc
def setup_extension_modules(self): """Sets up the C/C++/CUDA extension modules for this distribution. Create list of extensions for Python modules within the openmoc Python package based on the user-defined flags defined at compile time. """ # If the user selected 'all' compilers, enumerate them if self.cpp_compilers == ['all']: self.cpp_compilers = ['gcc', 'icpc', 'nvcc'] # If the user selected 'all' FP precision levels, enumerate them if self.fp_precision == ['all']: self.fp_precision = ['double', 'single'] # If the user wishes to compile using debug mode, append the debugging # flag to all lists of compiler flags for all distribution types if self.debug_mode: for k in self.compiler_flags: self.compiler_flags[k].append('-g') # If the user passed in the --no-numpy flag, tell SWIG not to embed # NumPy typemaps in the source code if not self.with_numpy: self.swig_flags.append('-DNO_NUMPY') # Otherwise, obtain the NumPy include directory else: try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() # Add the NumPy include directory to the include directories # list for each type of compiler for cc in self.include_directories.keys(): self.include_directories[cc].append(numpy_include) # The main openmoc extension (defaults are gcc and single precision) self.extensions.append( Extension(name = '_openmoc', sources = copy.deepcopy(self.sources[self.cc]), library_dirs = self.library_directories[self.cc], libraries = self.shared_libraries[self.cc], extra_link_args = self.linker_flags[self.cc], include_dirs = self.include_directories[self.cc], define_macros = self.macros[self.cc][self.fp], swig_opts = self.swig_flags + ['-D' + self.cc.upper()])) # The openmoc.cuda extension if requested by the user at compile # time (--with-cuda) if self.with_cuda: self.cpp_compilers.append('nvcc') self.extensions.append( Extension(name = '_openmoc_cuda', sources = copy.deepcopy(self.sources['nvcc']), library_dirs = self.library_directories['nvcc'], libraries = self.shared_libraries['nvcc'], extra_link_args = self.linker_flags['nvcc'], include_dirs = self.include_directories['nvcc'], define_macros = self.macros['nvcc'][self.fp], swig_opts = self.swig_flags + ['-DNVCC'], export_symbols = ['init_openmoc'])) # Remove the main SWIG configuration file for builds of other # extensions (ie, openmoc.cuda.single, openmoc.cuda.double) self.sources['nvcc'].remove('openmoc/cuda/openmoc_cuda_wrap.cpp') # Loop over the compilers and floating point precision levels to create # extension modules for each (ie, openmoc.intel.double, # openmoc.cuda.single, etc) for fp in self.fp_precision: for cc in self.cpp_compilers: # Build the filename for the SWIG configuration file and the # extension name depending on the compiler and floating # point precision # For openmoc.cuda.* modules if cc == 'nvcc': ext_name = '_openmoc_cuda_' + fp swig_wrap_file = 'openmoc/cuda/' + fp swig_wrap_file += '/openmoc_cuda_' + fp + '_wrap.cpp' self.sources['nvcc'].append(swig_wrap_file) # For openmoc.gnu.* modules elif cc == 'gcc': ext_name = '_openmoc_gnu_' + fp swig_wrap_file = 'openmoc/gnu/' + fp swig_wrap_file += '/openmoc_gnu_' + fp + '_wrap.cpp' self.sources['gcc'].append(swig_wrap_file) # For openmoc.intel.* modules elif cc == 'icpc': ext_name = '_openmoc_intel_' + fp swig_wrap_file = 'openmoc/intel/' + fp swig_wrap_file += '/openmoc_intel_' + fp + '_wrap.cpp' self.sources['icpc'].append(swig_wrap_file) # For openmoc.intel.* modules elif cc == 'bgxlc': ext_name = '_openmoc_bgq_' + fp swig_wrap_file = 'openmoc/bgq/' + fp swig_wrap_file += '/openmoc_bgq_' + fp + '_wrap.cpp' self.sources['bgxlc'].append(swig_wrap_file) # If an unsupported compiler, throw error else: raise NameError('Compiler ' + str(cc) + ' is not supported') # Create the extension module and append it to the list of all # extension modules self.extensions.append( Extension(name = ext_name, sources = copy.deepcopy(self.sources[cc]), library_dirs = self.library_directories[cc], libraries = self.shared_libraries[cc], extra_link_args = self.linker_flags[cc], include_dirs = self.include_directories[cc], define_macros = self.macros[cc][fp], swig_opts = self.swig_flags + ['-D' + cc.upper()])) # Clean up - remove the SWIG-generated wrap file from this # extension for the next extension self.sources[cc].remove(swig_wrap_file)
# setup script to compile jitter module; # written by M. Vallisneri (2015) # # use python setup.py build_ext --inplace to test import distutils.core as D import numpy as N from distutils.core import setup, Extension from distutils import sysconfig try: numpy_include = N.get_include() except AttributeError: numpy_include = N.get_numpy_include() # need to replace build_ext to build cython extension import Cython.Distutils extension = D.Extension('NX01_jitter', sources = ['NX01_jitter.pyx'], include_dirs = [numpy_include], extra_compile_args = ['-std=c99'] ) D.setup(name = 'NX01_jitter', ext_modules = [extension], cmdclass = {"build_ext": Cython.Distutils.build_ext} )
def make_cmf_core(): """ Puts all information needed for the Python extension object together - source files - include dirs - extra compiler flags """ libraries = None # Include CVODE include_dirs = [ os.path.join( *'cmf/cmf_core_src/math/integrators/sundials_cvode/include'.split( '/')) ] # Include numpy include_dirs += [get_numpy_include()] # Platform specific stuff, alternative is to subclass build_ext command as in: # https://stackoverflow.com/a/5192738/3032680 if sys.platform == 'win32': # Only include boost if VS2008 compiler is used, else we use C++ 11 if sys.version_info.major == 2: boost_path = os.environ.get('BOOSTDIR', r"..\boost_1_41_0") include_dirs += [boost_path, boost_path + r"\boost\tr1"] compile_args = [ "/EHsc", r'/Fd"build\vc90.pdb"', "/D_SCL_SECURE_NO_WARNINGS", "/D_CRT_SECURE_NO_WARNINGS", "/MP" ] if openmp: compile_args.append("/openmp") link_args = ["/DEBUG"] else: compile_args = [ '-Wno-comment', '-Wno-reorder', '-Wno-deprecated', '-Wno-unused', '-Wno-sign-compare', '-ggdb', '-std=c++11' ] if sys.platform == 'darwin': compile_args += ["-stdlib=libc++"] link_args = ['-ggdb'] libraries = [] # Disable OpenMP on Mac see https://github.com/alejandrobll/py-sphviewer/issues/3 if openmp and not sys.platform == 'darwin': compile_args.append('-fopenmp') link_args.append("-fopenmp") libraries.append('gomp') # Get the source files cmf_files = [] for root, _dirs, files in os.walk(os.path.join('cmf', 'cmf_core_src')): if os.path.basename(root) != 'debug_scripts': cmf_files.extend( os.path.join(root, f) for f in files if is_source_file(f) and f != 'cmf_wrap.cpp') if swig: # Adding cmf.i when build_ext should perform the swig call cmf_files.append("cmf/cmf_core_src/cmf.i") swig_opts = [ '-c++', '-Wextra', '-w512', '-w511', '-O', '-keyword', '-castmode', '-modern' ] else: # Else use what we have there cmf_files.append("cmf/cmf_core_src/cmf_wrap.cpp") swig_opts = [] cmf_core = Extension('cmf._cmf_core', sources=cmf_files, libraries=libraries, include_dirs=include_dirs, extra_compile_args=compile_args, extra_link_args=link_args, swig_opts=swig_opts) return cmf_core
cudaconfig = {'home':home, 'nvcc':nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')} for k, v in cudaconfig.iteritems(): if not os.path.exists(v): raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) return cudaconfig CUDA = locate_cuda() # Obtain the numpy include directory. This logic works across numpy versions. try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() def customize_compiler_for_nvcc(self): """inject deep into distutils to customize how the dispatch to gcc/nvcc works. If you subclass UnixCCompiler, it's not trivial to get your subclass injected in, and still have the right customizations (i.e. distutils.sysconfig.customize_compiler) run on it. So instead of going the OO route, I have this. Note, it's kindof like a wierd functional subclassing going on.""" # tell the compiler it can processes .cu self.src_extensions.append('.cu') # save references to the default compiler_so and _comple methods
import versioneer import setuptools.command.install import setuptools.command.build_ext from setuptools import setup from setuptools import Extension try: from numpy import get_include as get_numpy_include except ImportError: from numpy import get_numpy_include as get_numpy_include numpy_include = get_numpy_include() class BuildExtFirst(setuptools.command.install.install): def run(self): self.run_command("build_ext") return setuptools.command.install.install.run(self) class BuildExtOnce(setuptools.command.build_ext.build_ext): def __init__(self, *args, **kwargs): # Avoiding namespace collisions... self.setup_build_ext_already_ran = False setuptools.command.build_ext.build_ext.__init__(self, *args, **kwargs) def run(self): # Only let build_ext run once if not self.setup_build_ext_already_ran: self.setup_build_ext_already_ran = True return setuptools.command.build_ext.build_ext.run(self)
import os import sys from os.path import join from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext # Numpy from http://www.scipy.org/Cookbook/SWIG_NumPy_examples import numpy as np try: NUMPY_INCLUDE = np.get_include() except AttributeError: NUMPY_INCLUDE = np.get_numpy_include() # `EIGEN_INCLUDE` and `COMMON_CPP_INCLUDE` from site.cfg. import ConfigParser c = ConfigParser.ConfigParser() # Preserve case. See: # http://stackoverflow.com/questions/1611799/preserve-case-in-configparser c.optionxform = str c.read('site.cfg') EIGEN_INCLUDE = c.get('Include', 'EIGEN_INCLUDE') COMMON_CPP_INCLUDE = c.get('Include', 'COMMON_CPP_INCLUDE') # Setup. qpbo_dir = 'external/QPBO-v1.32.src/' include_dirs = [NUMPY_INCLUDE, EIGEN_INCLUDE, COMMON_CPP_INCLUDE, 'include/', qpbo_dir,
print "pyMDMix requires Python 2.5 or later. Python %d.%d detected" % \ sys.version_info[:2] print "Please upgrade your version of Python." sys.exit(-1) # Make sure AMBERHOME environ variable is set if not os.environ.get('AMBERHOME'): print "AMBERHOME env variable not set! Please set this variable pointing to AMBER package installation directory." #scriptlist = ['scripts/prepareMDMixProject.py','scripts/runCenteringAndRawEnergyCalculations.py','scripts/createMinotauroQueueInput.py','scripts/createPaintersQueueInput.py', 'scripts/runReplicaCentering.py','scripts/extendReplica.py', 'scripts/printReplicaInfo.py','scripts/createReplicaCenteringInput.py','scripts/runReplicaDensityAndRawCalculation.py', 'scripts/mdmix'] scriptlist = ['src/mdmix'] try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() include_dirs = [numpy_include] def getVersionFromInit(): lines = open('pyMDMix/__init__.py', 'r').readlines() for l in lines: if '__version__' in l: return l.split('=')[1].split('"')[1] setup( name='pyMDMix', cmdclass={'install': CustomInstall}, zip_safe=False, version=getVersionFromInit(),
def get_numpy_include(): try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() return numpy_include
from Cython.Build import cythonize import numpy as np from distutils.core import setup try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() print(numpy_include) numpy_include = r'C:/Users/Administrator/AppData/Roaming/Python/Python35/site-packages/numpy/core/include/' #ext_modules=cythonize(["bbox.pyx","cython_nms.pyx"],include_dirs=[numpy_include]), # include_path=[numpy_include] setup(ext_modules=cythonize(["bbox.pyx", "cython_nms.pyx"], include_path=[numpy_include]), )
def main(): try: import numpy try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() except ImportError: numpy_include = '' assert 'NUMPY_INCLUDE' in os.environ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() numpy_include = os.getenv('NUMPY_INCLUDE', numpy_include) numpy_min_ver = os.getenv('NUMPY_DEP_VERSION', '') project_name = 'deepspeech' if '--project_name' in sys.argv: project_name_idx = sys.argv.index('--project_name') project_name = sys.argv[project_name_idx + 1] sys.argv.remove('--project_name') sys.argv.pop(project_name_idx) with open('../../VERSION', 'r') as ver: project_version = ver.read().strip() class BuildExtFirst(build): sub_commands = [('build_ext', build.has_ext_modules), ('build_py', build.has_pure_modules), ('build_clib', build.has_c_libraries), ('build_scripts', build.has_scripts)] # Properly pass arguments for linking, setuptools will perform some checks def lib_dirs_split(a): if os.name == 'posix': return a.split('-L')[1:] if os.name == 'nt': return [] raise AssertionError('os.name == java not expected') def libs_split(a): if os.name == 'posix': return a.split('-l')[1:] if os.name == 'nt': return a.split('.lib')[0:1] raise AssertionError('os.name == java not expected') ds_ext = Extension(name='deepspeech._impl', sources=['impl.i'], include_dirs=[numpy_include, '../'], library_dirs=list(map(lambda x: x.strip(), lib_dirs_split(os.getenv('MODEL_LDFLAGS', '')))), libraries=list(map(lambda x: x.strip(), libs_split(os.getenv('MODEL_LIBS', '')))), swig_opts=['-c++', '-keyword', '-builtin']) setup(name=project_name, description='A library for running inference on a DeepSpeech model', long_description=read('../../README.md'), long_description_content_type='text/markdown; charset=UTF-8', author='Mozilla', version=project_version, package_dir={'deepspeech': '.'}, cmdclass={'build': BuildExtFirst}, license='MPL-2.0', url='https://github.com/mozilla/DeepSpeech', project_urls={ 'Documentation': 'https://github.com/mozilla/DeepSpeech/tree/v{}#project-deepspeech'.format(project_version), 'Tracker': 'https://github.com/mozilla/DeepSpeech/issues', 'Repository': 'https://github.com/mozilla/DeepSpeech/tree/v{}'.format(project_version), 'Discussions': 'https://discourse.mozilla.org/c/deep-speech', }, ext_modules=[ds_ext], py_modules=['deepspeech', 'deepspeech.client', 'deepspeech.impl'], entry_points={'console_scripts':['deepspeech=deepspeech.client:main']}, install_requires=['numpy%s' % numpy_min_ver], include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Multimedia :: Sound/Audio :: Speech', 'Topic :: Scientific/Engineering :: Human Machine Interfaces', 'Topic :: Scientific/Engineering', 'Topic :: Utilities', ])
def setup_package(): # Rewrite the version file every time write_version_py() info['version'] = get_version_info()[0] print(info['version']) if USE_CYTHON: # Obtain the numpy include directory. This logic works across numpy versions. ext_modules = [] HAS_NUMPY = True try: import numpy as np except: info['setup_requires'] = ['numpy'] HAS_NUMPY = False if HAS_NUMPY: try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() # creflect extension module _creflect = Extension( name='refnx.analysis._creflect', sources=['src/_creflect.pyx', 'src/refcalc.cpp'], include_dirs=[numpy_include], language='c', extra_link_args=['-lpthread'] # libraries= # extra_compile_args = "...".split(), ) ext_modules.append(_creflect) _cevent = Extension( name='refnx.reduce._cevent', sources=['src/_cevent.pyx'], include_dirs=[numpy_include], language='c', # libraries= # extra_compile_args = "...".split(), ) ext_modules.append(_cevent) info['cmdclass'] = {'build_ext': build_ext} info['ext_modules'] = ext_modules try: setup(**info) except ValueError: # there probably wasn't a C-compiler (windows). Try removing extension # compilation print("") print("*****WARNING*****") print("You didn't try to build the Reflectivity calculation extension." " Calculation will be slow, falling back to pure python." " To compile extension install cython. If installing in windows you" " should then install from Visual Studio command prompt (this makes" " C compiler available") print("*****************") print("") info.pop('cmdclass') info.pop('ext_modules') setup(**info)
This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see http://www.gnu.org/licenses/gpl-3.0.html for details. """ # System imports from distutils.core import * from distutils import sysconfig # Third-party modules - we depend on numpy for everything import numpy # Obtain the numpy include directory. This logic works across numpy versions. try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() # simple extension module _sasview_vmd = Extension( "_sasview_vmd", ["sasview_vmd.i", "sasview_vmd.c", "imd.c", "vmdsock.c"], include_dirs=[numpy_include] ) # NumyTypemapTests setup setup( name="SASVIEW VMD I/O", description="Module handles sending and receiving coordinates to VMD using numpy.i", author="Joseph E. Curtis", version="0.1", ext_modules=[_sasview_vmd], )