Ejemplo n.º 1
0
Archivo: setup.py Proyecto: mbr/githome
    def run(self):
        cc = new_compiler()
        customize_compiler(cc)
        o_files = cc.compile(['githome/gh_client.c'])
        cc.link_executable(o_files, 'githome/gh_client')

        install.run(self)  # run normal build command
Ejemplo n.º 2
0
 def __init__(self):
     self.debug = False
     self._compiler = new_compiler()
     customize_compiler(self._compiler)
     self._build_ext = build_ext(Distribution())
     self._build_ext.finalize_options()
     self._py_lib_dirs = self._build_ext.library_dirs
Ejemplo n.º 3
0
 def build_extensions(self):
     customize_compiler(self.compiler)
     try:
         self.compiler.compiler_so.remove("-Wstrict-prototypes")
     except (AttributeError, ValueError):
         pass
     build_ext.build_extensions(self)
Ejemplo n.º 4
0
def get_preprocessor():
    # findout which compiler to use
    from distutils.sysconfig import customize_compiler

    compiler_name = distutils.ccompiler.get_default_compiler()
    compiler = distutils.ccompiler.new_compiler(compiler=compiler_name)
    try:
        customize_compiler(compiler)
    except AttributeError as e:
        print("Warning: failed customizing compiler ({:s})".format(repr(e)))

    if hasattr(compiler, "initialize"):
        try:
            compiler.initialize()
        except ValueError as e:
            print("Warning: failed initializing compiler ({:s})".format(repr(e)))

    cpp_cmd = None
    if hasattr(compiler, "preprocessor"):  # for unixccompiler
        cpp_cmd = compiler.preprocessor
    elif hasattr(compiler, "compiler"):  # for ccompiler
        cpp_cmd = compiler.compiler.split()
        cpp_cmd += ["-E"]
    elif hasattr(compiler, "cc"):  # for msvccompiler
        cpp_cmd = compiler.cc.split()
        cpp_cmd += ["-E"]

    if not cpp_cmd:
        print("Warning: could not guess preprocessor, using env's CC")
        cpp_cmd = os.environ.get("CC", "cc").split()
        cpp_cmd += ["-E"]

    return cpp_cmd
Ejemplo n.º 5
0
 def __init__(self, debug=False):
     self._compiler = new_compiler()
     log.set_threshold(log.DEBUG if debug else log.INFO)
     customize_compiler(self._compiler)
     self._build_ext = build_ext(Distribution())
     self._build_ext.finalize_options()
     self._py_lib_dirs = self._build_ext.library_dirs
Ejemplo n.º 6
0
def CCompiler_customize(self, dist, need_cxx=0):
    """
    Do any platform-specific customization of a compiler instance.

    This method calls `distutils.sysconfig.customize_compiler` for
    platform-specific customization, as well as optionally remove a flag
    to suppress spurious warnings in case C++ code is being compiled.

    Parameters
    ----------
    dist : object
        This parameter is not used for anything.
    need_cxx : bool, optional
        Whether or not C++ has to be compiled. If so (True), the
        ``"-Wstrict-prototypes"`` option is removed to prevent spurious
        warnings. Default is False.

    Returns
    -------
    None

    Notes
    -----
    All the default options used by distutils can be extracted with::

      from distutils import sysconfig
      sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
                                'CCSHARED', 'LDSHARED', 'SO')

    """
    # See FCompiler.customize for suggested usage.
    log.info('customize %s' % (self.__class__.__name__))
    customize_compiler(self)
    if need_cxx:
        # In general, distutils uses -Wstrict-prototypes, but this option is
        # not valid for C++ code, only for C.  Remove it if it's there to
        # avoid a spurious warning on every compilation.  All the default
        # options used by distutils can be extracted with:

        # from distutils import sysconfig
        # sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
        # 'CCSHARED', 'LDSHARED', 'SO')
        try:
            self.compiler_so.remove('-Wstrict-prototypes')
        except (AttributeError, ValueError):
            pass

        if hasattr(self,'compiler') and 'cc' in self.compiler[0]:
            if not self.compiler_cxx:
                if self.compiler[0].startswith('gcc'):
                    a, b = 'gcc', 'g++'
                else:
                    a, b = 'cc', 'c++'
                self.compiler_cxx = [self.compiler[0].replace(a,b)]\
                                    + self.compiler[1:]
        else:
            if hasattr(self,'compiler'):
                log.warn("#### %s #######" % (self.compiler,))
            log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)
    return
Ejemplo n.º 7
0
    def _setup_extensions(self):
        ext = {e.name: e for e in self.extensions}

        compiler = new_compiler(compiler=self.compiler)
        customize_compiler(compiler)

        def add_dependency(ext, name):
            add_ext_pkg_config_dep(ext, compiler.compiler_type, name)

        def add_pycairo(ext):
            ext.include_dirs += [get_pycairo_include_dir()]

        gi_ext = ext["gi._gi"]
        add_dependency(gi_ext, "glib-2.0")
        add_dependency(gi_ext, "gio-2.0")
        add_dependency(gi_ext, "gobject-introspection-1.0")
        add_dependency(gi_ext, "libffi")
        add_ext_compiler_flags(gi_ext, compiler)

        if WITH_CAIRO:
            gi_cairo_ext = ext["gi._gi_cairo"]
            add_dependency(gi_cairo_ext, "glib-2.0")
            add_dependency(gi_cairo_ext, "gio-2.0")
            add_dependency(gi_cairo_ext, "gobject-introspection-1.0")
            add_dependency(gi_cairo_ext, "libffi")
            add_dependency(gi_cairo_ext, "cairo")
            add_dependency(gi_cairo_ext, "cairo-gobject")
            add_pycairo(gi_cairo_ext)
            add_ext_compiler_flags(gi_cairo_ext, compiler)
Ejemplo n.º 8
0
def get_cxxflags():
    from distutils.ccompiler import new_compiler
    from distutils.sysconfig import customize_compiler
    from distutils import sysconfig
    from platform import system
    if system() == DARWIN_KEY:
        CXX_FLAGS["gcc"] = CXX_FLAGS["gcc-mac"]
        CXX_FLAGS["cc"] = CXX_FLAGS["clang"]
        CXX_FLAGS["c++"] = CXX_FLAGS["clang"]
    elif system() == LINUX_KEY:
        CXX_FLAGS["gcc"] = CXX_FLAGS["gcc-linux"]
        CXX_FLAGS["cc"] = CXX_FLAGS["gcc"]
        CXX_FLAGS["c++"] = CXX_FLAGS["gcc"]
    else:
        raise UnsupportedCompilerException("System: %s is not supported by HOPE" % system())
    
    sysconfig.get_config_vars() #init vars
    compiler = new_compiler()
    customize_compiler(compiler)
    compiler_name = compiler.compiler[0].split("/")[-1]
    
    _check_version(compiler_name, compiler.compiler[0])
    
    for name, flags in CXX_FLAGS.items():
        if compiler_name.startswith(name):
            return flags
    raise UnsupportedCompilerException("Unknown compiler: {0}".format(compiler_name))
Ejemplo n.º 9
0
def check_sanity():
    """
    Test if development headers and library for rgw is available by compiling a dummy C program.
    """
    CEPH_SRC_DIR = os.path.join(
        os.path.dirname(os.path.abspath(__file__)),
        '..',
        '..'
    )

    tmp_dir = tempfile.mkdtemp(dir=os.environ.get('TMPDIR', os.path.dirname(__file__)))
    tmp_file = os.path.join(tmp_dir, 'rgw_dummy.c')

    with open(tmp_file, 'w') as fp:
        dummy_prog = textwrap.dedent("""
        #include <stddef.h>
        #include "rados/rgw_file.h"

        int main(void) {
            rgwfile_version(NULL, NULL, NULL);
            return 0;
        }
        """)
        fp.write(dummy_prog)

    compiler = new_compiler()
    customize_compiler(compiler)

    if {'MAKEFLAGS', 'MFLAGS', 'MAKELEVEL'}.issubset(set(os.environ.keys())):
        # The setup.py has been invoked by a top-level Ceph make.
        # Set the appropriate CFLAGS and LDFLAGS

        compiler.set_include_dirs([os.path.join(CEPH_SRC_DIR, 'include')])
        compiler.set_library_dirs([os.environ.get('CEPH_LIBDIR')])

    try:
        compiler.define_macro('_FILE_OFFSET_BITS', '64')

        link_objects = compiler.compile(
            sources=[tmp_file],
            output_dir=tmp_dir,
        )

        compiler.link_executable(
            objects=link_objects,
            output_progname=os.path.join(tmp_dir, 'rgw_dummy'),
            libraries=['rgw', 'rados'],
            output_dir=tmp_dir,
        )

    except CompileError:
        print('\nCompile Error: RGW development headers not found', file=sys.stderr)
        return False
    except LinkError:
        print('\nLink Error: RGW library not found', file=sys.stderr)
        return False
    else:
        return True
    finally:
        shutil.rmtree(tmp_dir)
Ejemplo n.º 10
0
def CCompiler_customize(self, dist, need_cxx=0):
    # See FCompiler.customize for suggested usage.
    log.info('customize %s' % (self.__class__.__name__))
    customize_compiler(self)
    if need_cxx:
        # In general, distutils uses -Wstrict-prototypes, but this option is
        # not valid for C++ code, only for C.  Remove it if it's there to
        # avoid a spurious warning on every compilation.  All the default
        # options used by distutils can be extracted with:

        # from distutils import sysconfig
        # sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
        # 'CCSHARED', 'LDSHARED', 'SO')
        print "compiler options1:", self.compiler_so
        try:
            self.compiler_so.remove('-Wstrict-prototypes')
        except (AttributeError, ValueError):
            pass
        print "compiler options2:", self.compiler_so
        if hasattr(self,'compiler') and self.compiler[0].find('cc')>=0:
            if not self.compiler_cxx:
                if self.compiler[0].startswith('gcc'):
                    a, b = 'gcc', 'g++'
                else:
                    a, b = 'cc', 'c++'
                self.compiler_cxx = [self.compiler[0].replace(a,b)]\
                                    + self.compiler[1:]
        else:
            if hasattr(self,'compiler'):
                 log.warn("#### %s #######" % (self.compiler,))
            log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)
    return
Ejemplo n.º 11
0
Archivo: setup.py Proyecto: zjc5415/pyq
    def run(self):
        for exe in self.distribution.executables:
            exe.include_dirs.append(get_python_inc())
            compiler = new_compiler(  # compiler=self.compiler,
                verbose=self.verbose,
                dry_run=self.dry_run,
                force=self.force)
            customize_compiler(compiler)
            compiler.set_include_dirs(exe.include_dirs)
            for (name, value) in exe.define_macros:
                compiler.define_macro(name, value)

            objects = compiler.compile(exe.sources, output_dir=self.build_temp)

            # This is a hack copied from distutils.commands.build_exe (where it is also called
            # a hack).
            self._build_objects = objects[:]

            library_dirs = [os.path.join(sys.exec_prefix, 'lib')]

            exe_path = join(self.build_lib, exe.name.split('.')[-1])

            compiler.link(CCompiler.EXECUTABLE,
                          objects=objects,
                          output_filename=exe_path,
                          library_dirs=library_dirs,
                          libraries=exe.libraries
                          )
Ejemplo n.º 12
0
    def run(self):
        from distutils.ccompiler import new_compiler
        if not self.extensions:
            return
        # Setup the CCompiler object that we'll use to do all the
        # compiling and linking
        self.compiler = new_compiler(compiler=self.compiler,
                                     verbose=self.verbose,
                                     dry_run=self.dry_run,
                                     force=self.force)
        customize_compiler(self.compiler)
         # And make sure that any compile/link-related options (which might
        # come from the command-line or from the setup script) are set in
        # that CCompiler object -- that way, they automatically apply to
        # all compiling and linking done here.
        if self.include_dirs is not None:
            self.compiler.set_include_dirs(self.include_dirs)
        if self.define is not None:
            # 'define' option is a list of (name,value) tuples
            for (name, value) in self.define:
                self.compiler.define_macro(name, value)
        if self.undef is not None:
            for macro in self.undef:
                self.compiler.undefine_macro(macro)
        if self.libraries is not None:
            self.compiler.set_libraries(self.libraries)
        if self.library_dirs is not None:
            self.compiler.set_library_dirs(self.library_dirs)
        if self.rpath is not None:
            self.compiler.set_runtime_library_dirs(self.rpath)
        if self.link_objects is not None:
            self.compiler.set_link_objects(self.link_objects)

        # Now actually compile and link everything.
        self.build_extensions()
Ejemplo n.º 13
0
def CCompiler_customize(self, dist, debug = False, need_cxx=0):
    # See FCompiler.customize for suggested usage.
    log.info('customize %s' % (self.__class__.__name__))
    customize_compiler(self)
    if need_cxx:
        # In general, distutils uses -Wstrict-prototypes, but this option is
        # not valid for C++ code, only for C.  Remove it if it's there to
        # avoid a spurious warning on every compilation.  All the default
        # options used by distutils can be extracted with:

        # from distutils import sysconfig
        # sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
        # 'CCSHARED', 'LDSHARED', 'SO')
        saferemove(self.compiler_so,'-Wstrict-prototypes')
        
        if debug:
            saferemove(self.compiler_so,'-DNDEBUG')
            safereplace(self.compiler_so,['-O','-O1','-O2','-O3'],'-O0')
        else:
            saferemove(self.compiler_so,'-g')
            safereplace(self.compiler_so,['-O','-O0','-O1','-O2'],'-O3')

        if hasattr(self,'compiler') and 'cc' in self.compiler[0]:
            if not self.compiler_cxx:
                if self.compiler[0].startswith('gcc'):
                    a, b = 'gcc', 'g++'
                else:
                    a, b = 'cc', 'c++'
                self.compiler_cxx = [self.compiler[0].replace(a,b)]\
                                    + self.compiler[1:]
        else:
            if hasattr(self,'compiler'):
                log.warn("#### %s #######" % (self.compiler,))
            log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)
    return
Ejemplo n.º 14
0
def customize_compiler(compiler, lang=None, mpicc=None, mpicxx=None, mpild=None):
    """ Implements the compiler configuration. """

    # Unix specific compilation customization
    assert compiler.compiler_type == 'unix'

    sysconfig.customize_compiler(compiler)

    ld = compiler.linker_exe
    for envvar in ('LDFLAGS', 'CFLAGS', 'CPPFLAGS'):
        if envvar in os.environ:
            ld += split_quoted(os.environ[envvar])

    # Compiler command overriding
    if mpicc:
        fix_compiler_cmd(compiler.compiler, mpicc)
        if lang in ('c', None):
            fix_compiler_cmd(compiler.compiler_so, mpicc)

    if mpicxx:
        fix_compiler_cmd(compiler.compiler_cxx, mpicxx)
        if lang == 'c++':
            fix_compiler_cmd(compiler.compiler_so, mpicxx)

    if mpild:
        for ld in [compiler.linker_so, compiler.linker_exe]:
            fix_linker_cmd(ld, mpild)

    badcxxflags = ['-Wimplicit', '-Wstrict-prototypes']
    for flag in badcxxflags:
        while flag in compiler.compiler_cxx:
            compiler.compiler_cxx.remove(flag)
        if lang == 'c++':
            while flag in compiler.compiler_so:
                compiler.compiler_so.remove(flag)
Ejemplo n.º 15
0
def _have_sqlite_extension_support():
    import shutil
    import tempfile
    from distutils.ccompiler import new_compiler
    from distutils.sysconfig import customize_compiler

    libraries = ['sqlite3']
    c_code = ('#include <sqlite3.h>\n\n'
              'int main(int argc, char **argv) { return 0; }')
    tmp_dir = tempfile.mkdtemp(prefix='tmp_pw_sqlite3_')
    bin_file = os.path.join(tmp_dir, 'test_pw_sqlite3')
    src_file = bin_file + '.c'
    with open(src_file, 'w') as fh:
        fh.write(c_code)

    compiler = new_compiler()
    customize_compiler(compiler)
    success = False
    try:
        compiler.link_executable(
            compiler.compile([src_file], output_dir=tmp_dir),
            bin_file,
            libraries=['sqlite3'])
    except CCompilerError:
        print('unable to compile sqlite3 C extensions - missing headers?')
    except DistutilsExecError:
        print('unable to compile sqlite3 C extensions - no c compiler?')
    except DistutilsPlatformError:
        print('unable to compile sqlite3 C extensions - platform error')
    else:
        success = True
    shutil.rmtree(tmp_dir)
    return success
Ejemplo n.º 16
0
def test_compilation(cfile, compiler=None, **compiler_attrs):
    """Test simple compilation with given settings"""
    if compiler is None or isinstance(compiler, str):
        cc = ccompiler.new_compiler(compiler=compiler)
        customize_compiler(cc)
        if cc.compiler_type == 'mingw32':
            customize_mingw(cc)
    else:
        cc = compiler

    for name, val in compiler_attrs.items():
        setattr(cc, name, val)

    efile, ext = os.path.splitext(cfile)

    cpreargs = lpreargs = None
    if sys.platform == 'darwin':
        # use appropriate arch for compiler
        if platform.architecture()[0]=='32bit':
            if platform.processor() == 'powerpc':
                cpu = 'ppc'
            else:
                cpu = 'i386'
            cpreargs = ['-arch', cpu]
            lpreargs = ['-arch', cpu, '-undefined', 'dynamic_lookup']
        else:
            # allow for missing UB arch, since it will still work:
            lpreargs = ['-undefined', 'dynamic_lookup']
    extra = compiler_attrs.get('extra_compile_args', None)

    objs = cc.compile([cfile],extra_preargs=cpreargs, extra_postargs=extra)
    cc.link_executable(objs, efile, extra_preargs=lpreargs)
    return efile
Ejemplo n.º 17
0
    def setup_shlib_compiler(self):
        compiler = self.shlib_compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=self.force)
        if sys.platform == 'darwin':
            tmp = _config_vars.copy()
            try:
                _config_vars['LDSHARED'] = 'gcc -Wl,-x -dynamiclib -undefined dynamic_lookup'
                _config_vars['CCSHARED'] = ' -dynamiclib'
                _config_vars['SO'] = '.dylib'
                customize_compiler(compiler)
            finally:
                _config_vars.clear()
                _config_vars.update(tmp)

        else:
            customize_compiler(compiler)
        if self.include_dirs is not None:
            compiler.set_include_dirs(self.include_dirs)
        if self.define is not None:
            for name, value in self.define:
                compiler.define_macro(name, value)

        if self.undef is not None:
            for macro in self.undef:
                compiler.undefine_macro(macro)

        if self.libraries is not None:
            compiler.set_libraries(self.libraries)
        if self.library_dirs is not None:
            compiler.set_library_dirs(self.library_dirs)
        if self.rpath is not None:
            compiler.set_runtime_library_dirs(self.rpath)
        if self.link_objects is not None:
            compiler.set_link_objects(self.link_objects)
        compiler.link_shared_object = link_shared_object.__get__(compiler)
        return
Ejemplo n.º 18
0
    def test_run(self):
        pkg_dir, dist = self.create_dist()
        cmd = build_clib(dist)

        foo_c = os.path.join(pkg_dir, 'foo.c')
        self.write_file(foo_c, 'int main(void) { return 1;}\n')
        cmd.libraries = [('foo', {'sources': [foo_c]})]

        build_temp = os.path.join(pkg_dir, 'build')
        os.mkdir(build_temp)
        cmd.build_temp = build_temp
        cmd.build_clib = build_temp

        # before we run the command, we want to make sure
        # all commands are present on the system
        # by creating a compiler and checking its executables
        from distutils.ccompiler import new_compiler
        from distutils.sysconfig import customize_compiler

        compiler = new_compiler()
        customize_compiler(compiler)
        for ccmd in compiler.executables.values():
            if ccmd is None:
                continue
            if find_executable(ccmd[0]) is None:
                self.skipTest('The %r command is not found' % ccmd[0])

        # this should work
        cmd.run()

        # let's check the result
        self.assertIn('libfoo.a', os.listdir(build_temp))
Ejemplo n.º 19
0
    def customize_compiler(self):
        # make sure AR gets caught
        class compiler:
            compiler_type = 'unix'

            def set_executables(self, **kw):
                self.exes = kw

        sysconfig_vars = {
            'AR': 'sc_ar',
            'CC': 'sc_cc',
            'CXX': 'sc_cxx',
            'ARFLAGS': '--sc-arflags',
            'CFLAGS': '--sc-cflags',
            'CCSHARED': '--sc-ccshared',
            'LDSHARED': 'sc_ldshared',
            'SHLIB_SUFFIX': 'sc_shutil_suffix',
        }

        comp = compiler()
        with contextlib.ExitStack() as cm:
            for key, value in sysconfig_vars.items():
                cm.enter_context(swap_item(sysconfig._config_vars, key, value))
            sysconfig.customize_compiler(comp)

        return comp
Ejemplo n.º 20
0
    def run(self):
        from distutils.ccompiler import new_compiler
        if not self.extensions:
            return
        else:
            if self.distribution.has_c_libraries():
                build_clib = self.get_finalized_command('build_clib')
                self.libraries.extend(build_clib.get_library_names() or [])
                self.library_dirs.append(build_clib.build_clib)
            self.compiler = new_compiler(compiler=self.compiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force)
            customize_compiler(self.compiler)
            if os.name == 'nt' and self.plat_name != get_platform():
                self.compiler.initialize(self.plat_name)
            if self.include_dirs is not None:
                self.compiler.set_include_dirs(self.include_dirs)
            if self.define is not None:
                for name, value in self.define:
                    self.compiler.define_macro(name, value)

            if self.undef is not None:
                for macro in self.undef:
                    self.compiler.undefine_macro(macro)

            if self.libraries is not None:
                self.compiler.set_libraries(self.libraries)
            if self.library_dirs is not None:
                self.compiler.set_library_dirs(self.library_dirs)
            if self.rpath is not None:
                self.compiler.set_runtime_library_dirs(self.rpath)
            if self.link_objects is not None:
                self.compiler.set_link_objects(self.link_objects)
            self.build_extensions()
            return
Ejemplo n.º 21
0
def check_sanity():
    """
    Test if development headers and library for cephfs is available by compiling a dummy C program.
    """
    CEPH_SRC_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")

    tmp_dir = tempfile.mkdtemp(dir=os.environ.get("TMPDIR", os.path.dirname(__file__)))
    tmp_file = os.path.join(tmp_dir, "cephfs_dummy.c")

    with open(tmp_file, "w") as fp:
        dummy_prog = textwrap.dedent(
            """
        #include <stddef.h>
        #include "cephfs/libcephfs.h"

        int main(void) {
            struct ceph_mount_info *cmount = NULL;
            ceph_init(cmount);
            return 0;
        }
        """
        )
        fp.write(dummy_prog)

    compiler = new_compiler()
    customize_compiler(compiler)

    if {"MAKEFLAGS", "MFLAGS", "MAKELEVEL"}.issubset(set(os.environ.keys())):
        # The setup.py has been invoked by a top-level Ceph make.
        # Set the appropriate CFLAGS and LDFLAGS

        compiler.set_library_dirs([os.environ.get("CEPH_LIBDIR")])

    try:
        compiler.define_macro("_FILE_OFFSET_BITS", "64")

        link_objects = compiler.compile(
            sources=[tmp_file],
            output_dir=tmp_dir,
            extra_preargs=["-iquote{path}".format(path=os.path.join(CEPH_SRC_DIR, "include"))],
        )

        compiler.link_executable(
            objects=link_objects,
            output_progname=os.path.join(tmp_dir, "cephfs_dummy"),
            libraries=["cephfs", "rados"],
            output_dir=tmp_dir,
        )

    except CompileError:
        print("\nCompile Error: Ceph FS development headers not found", file=sys.stderr)
        return False
    except LinkError:
        print("\nLink Error: Ceph FS library not found", file=sys.stderr)
        return False
    else:
        return True
    finally:
        shutil.rmtree(tmp_dir)
Ejemplo n.º 22
0
    def run (self):

        from distutils.ccompiler import new_compiler

        # 'self.extensions', as supplied by setup.py, is a list of
        # Extension instances.  See the documentation for Extension (in
        # distutils.extension) for details.
        #
        # For backwards compatibility with Distutils 0.8.2 and earlier, we
        # also allow the 'extensions' list to be a list of tuples:
        #    (ext_name, build_info)
        # where build_info is a dictionary containing everything that
        # Extension instances do except the name, with a few things being
        # differently named.  We convert these 2-tuples to Extension
        # instances as needed.

        if not self.extensions:
            return

        # If we were asked to build any C/C++ libraries, make sure that the
        # directory where we put them is in the library search path for
        # linking extensions.
        if self.distribution.has_c_libraries():
            build_clib = self.get_finalized_command('build_clib')
            self.libraries.extend(build_clib.get_library_names() or [])
            self.library_dirs.append(build_clib.build_clib)

        # Setup the CCompiler object that we'll use to do all the
        # compiling and linking
        self.compiler = new_compiler(compiler=self.compiler,
                                     verbose=self.verbose,
                                     dry_run=self.dry_run,
                                     force=self.force)
        customize_compiler(self.compiler)

        # And make sure that any compile/link-related options (which might
        # come from the command-line or from the setup script) are set in
        # that CCompiler object -- that way, they automatically apply to
        # all compiling and linking done here.
        if self.include_dirs is not None:
            self.compiler.set_include_dirs(self.include_dirs)
        if self.define is not None:
            # 'define' option is a list of (name,value) tuples
            for (name,value) in self.define:
                self.compiler.define_macro(name, value)
        if self.undef is not None:
            for macro in self.undef:
                self.compiler.undefine_macro(macro)
        if self.libraries is not None:
            self.compiler.set_libraries(self.libraries)
        if self.library_dirs is not None:
            self.compiler.set_library_dirs(self.library_dirs)
        if self.rpath is not None:
            self.compiler.set_runtime_library_dirs(self.rpath)
        if self.link_objects is not None:
            self.compiler.set_link_objects(self.link_objects)

        # Now actually compile and link everything.
        self.build_extensions()
Ejemplo n.º 23
0
def prepare_extra_c_file(info):
    c_file = info['filename']
    compile_args = info.get('compile_args', [])
    cc = new_compiler(verbose=3)
    customize_compiler(cc)
    [o_file] = cc.compile([c_file], '.',
                         extra_postargs=compile_args)
    return o_file
Ejemplo n.º 24
0
    def build_libraries(self, libraries):
        customize_compiler(self.compiler)

        try:
            self.compiler.compiler_so.remove("-Wstrict-prototypes")
        except (AttributeError, ValueError):
            pass

        build_clib.build_libraries(self, libraries)
Ejemplo n.º 25
0
 def build_extensions(self):
     # remove annoying flag which causes warning for c++ sources
     # https://stackoverflow.com/a/36293331/351771
     customize_compiler(self.compiler)
     try:
         self.compiler.compiler_so.remove("-Wstrict-prototypes")
     except (AttributeError, ValueError):
         pass
     distutils.command.build_ext.build_ext.build_extensions(self)
Ejemplo n.º 26
0
def detect_zmq(basedir, compiler=None, **compiler_attrs):
    """Compile, link & execute a test program, in empty directory `basedir`.
    
    The C compiler will be updated with any keywords given via setattr.
    
    Parameters
    ----------
    
    basedir : path
        The location where the test program will be compiled and run
    compiler : str
        The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32')
    **compiler_attrs : dict
        Any extra compiler attributes, which will be set via ``setattr(cc)``.
    
    Returns
    -------
    
    A dict of properties for zmq compilation, with the following two keys:
    
    vers : tuple
        The ZMQ version as a tuple of ints, e.g. (2,2,0)
    settings : dict
        The compiler options used to compile the test function, e.g. `include_dirs`,
        `library_dirs`, `libs`, etc.
    """
    
    cfile = pjoin(basedir, 'vers.c')
    shutil.copy(pjoin(os.path.dirname(__file__), 'vers.c'), cfile)
    
    # check if we need to link against Realtime Extensions library
    if sys.platform.startswith('linux'):
        cc = ccompiler.new_compiler(compiler=compiler)
        customize_compiler(cc)
        cc.output_dir = basedir
        if not cc.has_function('timer_create'):
            compiler_attrs['libraries'].append('rt')
    
    cc = get_compiler(compiler=compiler, **compiler_attrs)
    efile = test_compilation(cfile, compiler=cc, **compiler_attrs)
    patch_lib_paths(efile, cc.library_dirs)
    
    rc, so, se = get_output_error([efile])
    if rc:
        msg = "Error running version detection script:\n%s\n%s" % (so,se)
        logging.error(msg)
        raise IOError(msg)

    handlers = {'vers':  lambda val: tuple(int(v) for v in val.split('.'))}

    props = {}
    for line in (x for x in so.split('\n') if x):
        key, val = line.split(':')
        props[key] = handlers[key](val)

    return props
Ejemplo n.º 27
0
 def _detect_sse41(self):
     "Does this compiler support SSE4.1 intrinsics?"
     compiler = new_compiler()
     customize_compiler(compiler)
     self._print_support_start('SSE4.1')
     result = self.hasfunction(compiler, '__m128 v; _mm_round_ps(v,0x00)',
                        include='<smmintrin.h>',
                        extra_postargs=['-msse4'])
     self._print_support_end('SSE4.1', result)
     return result
Ejemplo n.º 28
0
 def __init__(self, lang_ext="c"):
     ccompiler = new_compiler()
     customize_compiler(ccompiler)
     self.ccompiler = ccompiler
     self.macros = []
     self.include_dirs = []
     self.default_lang_ext = lang_ext
     self.header_availability = {}
     self.type_availability = {}
     self.decl_availability = {}
Ejemplo n.º 29
0
 def build_extensions(self):
     customize_compiler(self.compiler)
     try:
         self.compiler.compiler_so.remove("-Wstrict-prototypes")
     except (AttributeError, ValueError):
         pass
     lto_flags=["-flto", "-flto-partition=none", "-fuse-linker-plugin",
                "-ffat-lto-objects"]
     self.compiler.compiler_so = [f for f in self.compiler.compiler_so if f not in lto_flags]
     build_ext.build_extensions(self)
Ejemplo n.º 30
0
 def _detect_sse3(self):
     "Does this compiler support SSE3 intrinsics?"
     compiler = new_compiler()
     customize_compiler(compiler)
     self._print_support_start('SSE3')
     result = self.hasfunction(compiler, '__m128 v; _mm_hadd_ps(v,v)',
                        include='<pmmintrin.h>',
                        extra_postargs=['-msse3'])
     self._print_support_end('SSE3', result)
     return result
Ejemplo n.º 31
0
 def setUp(self):
     self.compiler = ccompiler.new_compiler()
     sysconfig.customize_compiler(self.compiler)
     self.settings = build.get_compiler_setting()
Ejemplo n.º 32
0
    def bundle_libzmq_extension(self):
        bundledir = "bundled"
        ext_modules = self.distribution.ext_modules
        if ext_modules and any(m.name == 'zmq.libzmq' for m in ext_modules):
            # I've already been run
            return

        line()
        info("Using bundled libzmq")

        # fetch sources for libzmq extension:
        if not os.path.exists(bundledir):
            os.makedirs(bundledir)

        fetch_libzmq(bundledir)

        stage_platform_hpp(pjoin(bundledir, 'zeromq'))

        sources = [pjoin('buildutils', 'initlibzmq.cpp')]
        sources += glob(pjoin(bundledir, 'zeromq', 'src', '*.cpp'))

        includes = [pjoin(bundledir, 'zeromq', 'include')]

        if bundled_version < (4, 2, 0):
            tweetnacl = pjoin(bundledir, 'zeromq', 'tweetnacl')
            tweetnacl_sources = glob(pjoin(tweetnacl, 'src', '*.c'))

            randombytes = pjoin(tweetnacl, 'contrib', 'randombytes')
            if sys.platform.startswith('win'):
                tweetnacl_sources.append(pjoin(randombytes, 'winrandom.c'))
            else:
                tweetnacl_sources.append(pjoin(randombytes, 'devurandom.c'))

            sources += tweetnacl_sources
            includes.append(pjoin(tweetnacl, 'src'))
            includes.append(randombytes)
        else:
            # >= 4.2
            sources += glob(pjoin(bundledir, 'zeromq', 'src', 'tweetnacl.c'))

        # construct the Extensions:
        libzmq = Extension(
            'zmq.libzmq',
            sources=sources,
            include_dirs=includes,
        )

        # register the extension:
        # doing this here means we must be run
        # before finalize_options in build_ext
        self.distribution.ext_modules.insert(0, libzmq)

        # use tweetnacl to provide CURVE support
        libzmq.define_macros.append(('ZMQ_HAVE_CURVE', 1))
        libzmq.define_macros.append(('ZMQ_USE_TWEETNACL', 1))

        # select polling subsystem based on platform
        if sys.platform == "darwin" or "bsd" in sys.platform:
            libzmq.define_macros.append(('ZMQ_USE_KQUEUE', 1))
            libzmq.define_macros.append(('ZMQ_IOTHREADS_USE_KQUEUE', 1))
            libzmq.define_macros.append(('ZMQ_POLL_BASED_ON_POLL', 1))
        elif 'linux' in sys.platform:
            libzmq.define_macros.append(('ZMQ_USE_EPOLL', 1))
            libzmq.define_macros.append(('ZMQ_IOTHREADS_USE_EPOLL', 1))
            libzmq.define_macros.append(('ZMQ_POLL_BASED_ON_POLL', 1))
        elif sys.platform.startswith('win'):
            libzmq.define_macros.append(('ZMQ_USE_SELECT', 1))
            libzmq.define_macros.append(('ZMQ_IOTHREADS_USE_SELECT', 1))
            libzmq.define_macros.append(('ZMQ_POLL_BASED_ON_SELECT', 1))
        else:
            # this may not be sufficiently precise
            libzmq.define_macros.append(('ZMQ_USE_POLL', 1))
            libzmq.define_macros.append(('ZMQ_IOTHREADS_USE_POLL', 1))
            libzmq.define_macros.append(('ZMQ_POLL_BASED_ON_POLL', 1))

        if sys.platform.startswith('win'):
            # include defines from zeromq msvc project:
            libzmq.define_macros.append(('FD_SETSIZE', 16384))
            libzmq.define_macros.append(('DLL_EXPORT', 1))
            libzmq.define_macros.append(('_CRT_SECURE_NO_WARNINGS', 1))

            # When compiling the C++ code inside of libzmq itself, we want to
            # avoid "warning C4530: C++ exception handler used, but unwind
            # semantics are not enabled. Specify /EHsc".
            if self.compiler_type == 'msvc':
                libzmq.extra_compile_args.append('/EHsc')
            elif self.compiler_type == 'mingw32':
                libzmq.define_macros.append(('ZMQ_HAVE_MINGW32', 1))

            # And things like sockets come from libraries that must be named.
            libzmq.libraries.extend(
                ['rpcrt4', 'ws2_32', 'advapi32', 'iphlpapi'])

            # bundle MSCVP redist
            if self.config['bundle_msvcp']:
                cc = new_compiler(compiler=self.compiler_type)
                cc.initialize()
                # get vc_redist location via private API
                try:
                    cc._vcruntime_redist
                except AttributeError:
                    # fatal error if env set, warn otherwise
                    msg = fatal if os.environ.get("PYZMQ_BUNDLE_CRT") else warn
                    msg("Failed to get cc._vcruntime via private API, not bundling CRT"
                        )
                if getattr(cc, "_vcruntime_redist", False):
                    redist_dir, dll = os.path.split(cc._vcruntime_redist)
                    to_bundle = [
                        pjoin(redist_dir, dll.replace('vcruntime', name))
                        for name in ('msvcp', 'concrt')
                    ]
                    for src in to_bundle:
                        dest = localpath('zmq', basename(src))
                        info("Copying %s -> %s" % (src, dest))
                        # copyfile to avoid permission issues
                        shutil.copyfile(src, dest)

        else:
            libzmq.include_dirs.append(bundledir)

            # check if we need to link against Realtime Extensions library
            cc = new_compiler(compiler=self.compiler_type)
            customize_compiler(cc)
            cc.output_dir = self.build_temp
            if not sys.platform.startswith(('darwin', 'freebsd')):
                line()
                info("checking for timer_create")
                if not cc.has_function('timer_create'):
                    info("no timer_create, linking librt")
                    libzmq.libraries.append('rt')
                else:
                    info("ok")

        # copy the header files to the source tree.
        bundledincludedir = pjoin('zmq', 'include')
        if not os.path.exists(bundledincludedir):
            os.makedirs(bundledincludedir)
        if not os.path.exists(pjoin(self.build_lib, bundledincludedir)):
            os.makedirs(pjoin(self.build_lib, bundledincludedir))

        for header in glob(pjoin(bundledir, 'zeromq', 'include', '*.h')):
            shutil.copyfile(header, pjoin(bundledincludedir, basename(header)))
            shutil.copyfile(
                header,
                pjoin(self.build_lib, bundledincludedir, basename(header)))

        # update other extensions, with bundled settings
        self.config['libzmq_extension'] = True
        self.init_settings_from_config()
        self.save_config('config', self.config)
Ejemplo n.º 33
0
def mypycify(
    paths: List[str],
    mypy_options: Optional[List[str]] = None,
    *,
    verbose: bool = False,
    opt_level: str = '3',
    strip_asserts: bool = False,
    multi_file: bool = False,
    separate: Union[bool, List[Tuple[List[str], Optional[str]]]] = False,
    skip_cgen_input: Optional[Any] = None
) -> List[Extension]:
    """Main entry point to building using mypyc.

    This produces a list of Extension objects that should be passed as the
    ext_modules parameter to setup.

    Arguments:
        paths: A list of file paths to build. It may contain globs.
        mypy_options: Optionally, a list of command line flags to pass to mypy.
                      (This can also contain additional files, for compatibility reasons.)
        verbose: Should mypyc be more verbose. Defaults to false.

        opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
        strip_asserts: Should asserts be stripped from the generated code.

        multi_file: Should each Python module be compiled into its own C source file.
                    This can reduce compile time and memory requirements at the likely
                    cost of runtime performance of compiled code. Defaults to false.
        separate: Should compiled modules be placed in separate extension modules.
                  If False, all modules are placed in a single shared library.
                  If True, every module is placed in its own library.
                  Otherwise separate should be a list of
                  (file name list, optional shared library name) pairs specifying
                  groups of files that should be placed in the same shared library
                  (while all other modules will be placed in its own library).

                  Each group can be compiled independently, which can
                  speed up compilation, but calls between groups can
                  be slower than calls within a group and can't be
                  inlined.
    """

    setup_mypycify_vars()
    compiler_options = CompilerOptions(strip_asserts=strip_asserts,
                                       multi_file=multi_file, verbose=verbose)

    # Create a compiler object so we can make decisions based on what
    # compiler is being used. typeshed is missing some attribues on the
    # compiler object so we give it type Any
    compiler = ccompiler.new_compiler()  # type: Any
    sysconfig.customize_compiler(compiler)

    expanded_paths = []
    for path in paths:
        expanded_paths.extend(glob.glob(path))

    build_dir = 'build'  # TODO: can this be overridden??
    try:
        os.mkdir(build_dir)
    except FileExistsError:
        pass

    sources, options = get_mypy_config(expanded_paths, mypy_options)
    # We generate a shared lib if there are multiple modules or if any
    # of the modules are in package. (Because I didn't want to fuss
    # around with making the single module code handle packages.)
    use_shared_lib = len(sources) > 1 or any('.' in x.module for x in sources)

    groups = construct_groups(sources, separate, use_shared_lib)

    # We let the test harness just pass in the c file contents instead
    # so that it can do a corner-cutting version without full stubs.
    if not skip_cgen_input:
        group_cfiles, ops_text = generate_c(sources, options, groups,
                                            compiler_options=compiler_options)
        # TODO: unique names?
        with open(os.path.join(build_dir, 'ops.txt'), 'w') as f:
            f.write(ops_text)
    else:
        group_cfiles = skip_cgen_input

    # Write out the generated C and collect the files for each group
    group_cfilenames = []  # type: List[Tuple[List[str], List[str]]]
    for cfiles in group_cfiles:
        cfilenames = []
        for cfile, ctext in cfiles:
            cfile = os.path.join(build_dir, cfile)
            write_file(cfile, ctext)
            if os.path.splitext(cfile)[1] == '.c':
                cfilenames.append(cfile)

        deps = [os.path.join(build_dir, dep) for dep in get_header_deps(cfiles)]
        group_cfilenames.append((cfilenames, deps))

    cflags = []  # type: List[str]
    if compiler.compiler_type == 'unix':
        cflags += [
            '-O{}'.format(opt_level), '-Werror', '-Wno-unused-function', '-Wno-unused-label',
            '-Wno-unreachable-code', '-Wno-unused-variable', '-Wno-trigraphs',
            '-Wno-unused-command-line-argument', '-Wno-unknown-warning-option',
        ]
        if 'gcc' in compiler.compiler[0]:
            # This flag is needed for gcc but does not exist on clang.
            cflags += ['-Wno-unused-but-set-variable']
    elif compiler.compiler_type == 'msvc':
        if opt_level == '3':
            opt_level = '2'
        cflags += [
            '/O{}'.format(opt_level),
            '/wd4102',  # unreferenced label
            '/wd4101',  # unreferenced local variable
            '/wd4146',  # negating unsigned int
        ]
        if multi_file:
            # Disable whole program optimization in multi-file mode so
            # that we actually get the compilation speed and memory
            # use wins that multi-file mode is intended for.
            cflags += [
                '/GL-',
                '/wd9025',  # warning about overriding /GL
            ]

    # Copy the runtime library in
    shared_cfilenames = []
    for name in ['CPy.c', 'getargs.c']:
        rt_file = os.path.join(build_dir, name)
        with open(os.path.join(include_dir(), name), encoding='utf-8') as f:
            write_file(rt_file, f.read())
        shared_cfilenames.append(rt_file)

    extensions = []
    for (group_sources, lib_name), (cfilenames, deps) in zip(groups, group_cfilenames):
        if use_shared_lib:
            assert lib_name
            extensions.extend(build_using_shared_lib(
                group_sources, lib_name, cfilenames + shared_cfilenames, deps, build_dir, cflags))
        else:
            extensions.extend(build_single_module(
                group_sources, cfilenames + shared_cfilenames, cflags))

    return extensions
Ejemplo n.º 34
0
                        include_dirs = ["pyshapelib/shapelib"]),
              Extension("dbflibc",
                        ["pyshapelib/dbflib_wrap.c",
                         "pyshapelib/shapelib/dbfopen.c"],
                        include_dirs = ["pyshapelib/shapelib"],
                        define_macros = dbf_macros()) ]
else:
    print 'will not install pyshapelib'

# Specify all the required mpl data
# create pyproj binary datum shift grid files.
pathout =\
os.path.join('lib',os.path.join('mpl_toolkits',os.path.join('basemap','data')))
if sys.argv[1] != 'sdist':
    cc = ccompiler.new_compiler()
    sysconfig.customize_compiler(cc)
    cc.set_include_dirs(['src'])
    objects = cc.compile(['nad2bin.c'])
    execname = 'nad2bin'
    cc.link_executable(objects, execname)
    llafiles = glob.glob('datumgrid/*.lla')
    cmd = os.path.join(os.getcwd(), execname)
    for f in llafiles:
        fout = os.path.basename(f.split('.lla')[0])
        fout = os.path.join(pathout, fout)
        str = '%s %s < %s' % (cmd, fout, f)
        print 'executing ', str
        subprocess.call(str, shell=True)
datafiles = glob.glob(os.path.join(pathout, '*'))
datafiles = [os.path.join('data', os.path.basename(f)) for f in datafiles]
package_data = {'mpl_toolkits.basemap': datafiles}
Ejemplo n.º 35
0
def check_for_openmp():
    """Returns True if local setup supports OpenMP, False otherwise

    Code adapted from astropy_helpers, originally written by Tom
    Robitaille and Curtis McCully.
    """

    # Create a temporary directory
    ccompiler = new_compiler()
    customize_compiler(ccompiler)

    tmp_dir = tempfile.mkdtemp()
    start_dir = os.path.abspath(".")

    if os.name == "nt":
        # TODO: make this work with mingw
        # AFAICS there's no easy way to get the compiler distutils
        # will be using until compilation actually happens
        compile_flag = "-openmp"
        link_flag = ""
    else:
        compile_flag = "-fopenmp"
        link_flag = "-fopenmp"

    try:
        os.chdir(tmp_dir)

        with open("test_openmp.c", "w") as f:
            f.write(CCODE)

        os.mkdir("objects")

        # Compile, link, and run test program
        with stdchannel_redirected(sys.stderr, os.devnull):
            ccompiler.compile(
                ["test_openmp.c"], output_dir="objects", extra_postargs=[compile_flag]
            )
            ccompiler.link_executable(
                glob.glob(os.path.join("objects", "*")),
                "test_openmp",
                extra_postargs=[link_flag],
            )
            output = (
                subprocess.check_output("./test_openmp")
                .decode(sys.stdout.encoding or "utf-8")
                .splitlines()
            )

        if "nthreads=" in output[0]:
            nthreads = int(output[0].strip().split("=")[1])
            if len(output) == nthreads:
                using_openmp = True
            else:
                log.warn(
                    "Unexpected number of lines from output of test "
                    "OpenMP program (output was %s)",
                    output,
                )
                using_openmp = False
        else:
            log.warn(
                "Unexpected output from test OpenMP program (output was %s)", output
            )
            using_openmp = False

    except (CompileError, LinkError):
        using_openmp = False
    finally:
        os.chdir(start_dir)

    if using_openmp:
        log.warn("Using OpenMP to compile parallel extensions")
    else:
        log.warn(
            "Unable to compile OpenMP test program so Cython\n"
            "extensions will be compiled without parallel support"
        )

    return using_openmp
Ejemplo n.º 36
0
def CCompiler_customize(self, dist, need_cxx=0):
    """
    Do any platform-specific customization of a compiler instance.

    This method calls `distutils.sysconfig.customize_compiler` for
    platform-specific customization, as well as optionally remove a flag
    to suppress spurious warnings in case C++ code is being compiled.

    Parameters
    ----------
    dist : object
        This parameter is not used for anything.
    need_cxx : bool, optional
        Whether or not C++ has to be compiled. If so (True), the
        ``"-Wstrict-prototypes"`` option is removed to prevent spurious
        warnings. Default is False.

    Returns
    -------
    None

    Notes
    -----
    All the default options used by distutils can be extracted with::

      from distutils import sysconfig
      sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
                                'CCSHARED', 'LDSHARED', 'SO')

    """
    # See FCompiler.customize for suggested usage.
    log.info('customize %s' % (self.__class__.__name__))
    customize_compiler(self)
    if need_cxx:
        # In general, distutils uses -Wstrict-prototypes, but this option is
        # not valid for C++ code, only for C.  Remove it if it's there to
        # avoid a spurious warning on every compilation.
        try:
            self.compiler_so.remove('-Wstrict-prototypes')
        except (AttributeError, ValueError):
            pass

        if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
            if not self.compiler_cxx:
                if self.compiler[0].startswith('gcc'):
                    a, b = 'gcc', 'g++'
                else:
                    a, b = 'cc', 'c++'
                self.compiler_cxx = [self.compiler[0].replace(a, b)]\
                                    + self.compiler[1:]
        else:
            if hasattr(self, 'compiler'):
                log.warn("#### %s #######" % (self.compiler,))
            if not hasattr(self, 'compiler_cxx'):
                log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)


    # check if compiler supports gcc style automatic dependencies
    # run on every extension so skip for known good compilers
    if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
                                      'g++' in self.compiler[0] or
                                      'clang' in self.compiler[0]):
        self._auto_depends = True
    elif os.name == 'posix':
        import tempfile
        import shutil
        tmpdir = tempfile.mkdtemp()
        try:
            fn = os.path.join(tmpdir, "file.c")
            with open(fn, "w") as f:
                f.write("int a;\n")
            self.compile([fn], output_dir=tmpdir,
                         extra_preargs=['-MMD', '-MF', fn + '.d'])
            self._auto_depends = True
        except CompileError:
            self._auto_depends = False
        finally:
            shutil.rmtree(tmpdir)

    return
Ejemplo n.º 37
0
    def finalize_options(self):
        _build.finalize_options(self)

        compiler = new_compiler(compiler=self.compiler, verbose=self.verbose)
        customize_compiler(compiler)

        disabled_libraries = []

        # Section for custom limits imposed on the SIMD instruction levels based
        # on the installed compiler
        plat_compiler = platform.python_compiler()
        if plat_compiler.lower().startswith('gcc'):
            # Check the installed gcc version, as versions older than 7.0 claim to
            # support avx512 but are missing some intrinsics that FastNoiseSIMD calls
            output = subprocess.check_output('gcc --version', shell=True)
            gcc_version = tuple([
                int(x)
                for x in re.findall(b'\d+(?:\.\d+)+', output)[0].split(b'.')
            ])
            if gcc_version < (7, 2):  # Disable AVX512
                disabled_libraries.append('avx512')
            if gcc_version < (4, 7):  # Disable AVX2
                disabled_libraries.append('avx2')
        elif plat_compiler.lower().startswith('msc'):
            # No versions of Windows Python support AVX512 yet
            #                 MSVC++ 14.1 _MSC_VER == 1911 (Visual Studio 2017)
            #                 MSVC++ 14.1 _MSC_VER == 1910 (Visual Studio 2017)
            # Python 3.5/3.6: MSVC++ 14.0 _MSC_VER == 1900 (Visual Studio 2015)
            # Python 3.4:     MSVC++ 10.0 _MSC_VER == 1600 (Visual Studio 2010)
            # Python 2.7:     MSVC++ 9.0  _MSC_VER == 1500 (Visual Studio 2008)
            # Here we just assume the user has the platform compiler
            msc_version = int(
                re.findall('v\.\d+', plat_compiler)[0].lstrip('v.'))
            # print('FOUND MSVC VERSION: ', msc_version)
            # Still not working with MSVC2017 yet with 1915 and Python 3.7, it
            # cannot find the function `_mm512_floor_ps`
            if msc_version < 1916:
                disabled_libraries.append('avx512')
            if msc_version < 1900:
                disabled_libraries.append('avx2')
        # End of SIMD limits

        for name, lib in self.distribution.libraries:
            val = getattr(self, 'with_' + name)
            if val not in ('auto', 'yes', 'no'):
                raise DistutilsOptionError('with_%s flag must be auto, yes, '
                                           'or no, not "%s".' % (name, val))

            if val == 'no':
                disabled_libraries.append(name)
                continue

            if not self.compiler_has_flags(compiler, name, lib['cflags']):
                if val == 'yes':
                    # Explicitly required but not available.
                    raise CCompilerError('%s is not supported by your '
                                         'compiler.' % (name, ))
                disabled_libraries.append(name)

        use_fma = False
        if (self.with_fma != 'no' and ('avx512' not in disabled_libraries
                                       or 'avx2' not in disabled_libraries)):
            if fma_flags is None:
                # No flags required.
                use_fma = True
            elif self.compiler_has_flags(compiler, 'fma', fma_flags):
                use_fma = True
                avx512['cflags'] += fma_flags
                avx2['cflags'] += fma_flags
            elif self.with_fma == 'yes':
                # Explicitly required but not available.
                raise CCompilerError('FMA is not supported by your compiler.')

        self.distribution.libraries = [
            lib for lib in self.distribution.libraries
            if lib[0] not in disabled_libraries
        ]

        with open('pyfastnoisesimd/fastnoisesimd/x86_flags.h', 'wb') as fh:
            fh.write(b'// This file is generated by setup.py, '
                     b'do not edit it by hand\n')
            for name, lib in self.distribution.libraries:
                fh.write(b'#define FN_COMPILE_%b\n' %
                         (name.upper().encode('ascii', )))
            if use_fma:
                fh.write(b'#define FN_USE_FMA\n')
Ejemplo n.º 38
0
def mypycify(
        paths: List[str],
        *,
        only_compile_paths: Optional[Iterable[str]] = None,
        verbose: bool = False,
        opt_level: str = "3",
        debug_level: str = "1",
        strip_asserts: bool = False,
        multi_file: bool = False,
        separate: Union[bool, List[Tuple[List[str], Optional[str]]]] = False,
        skip_cgen_input: Optional[Any] = None,
        target_dir: Optional[str] = None,
        include_runtime_files: Optional[bool] = None) -> List['Extension']:
    """Main entry point to building using mypyc.

    This produces a list of Extension objects that should be passed as the
    ext_modules parameter to setup.

    Arguments:
        paths: A list of file paths to build. It may also contain mypy options.
        only_compile_paths: If not None, an iterable of paths that are to be
                            the only modules compiled, even if other modules
                            appear in the mypy command line given to paths.
                            (These modules must still be passed to paths.)

        verbose: Should mypyc be more verbose. Defaults to false.

        opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
        debug_level: The debug level, as a string. Defaults to '1' (meaning '-g1').
        strip_asserts: Should asserts be stripped from the generated code.

        multi_file: Should each Python module be compiled into its own C source file.
                    This can reduce compile time and memory requirements at the likely
                    cost of runtime performance of compiled code. Defaults to false.
        separate: Should compiled modules be placed in separate extension modules.
                  If False, all modules are placed in a single shared library.
                  If True, every module is placed in its own library.
                  Otherwise separate should be a list of
                  (file name list, optional shared library name) pairs specifying
                  groups of files that should be placed in the same shared library
                  (while all other modules will be placed in its own library).

                  Each group can be compiled independently, which can
                  speed up compilation, but calls between groups can
                  be slower than calls within a group and can't be
                  inlined.
        target_dir: The directory to write C output files. Defaults to 'build'.
        include_runtime_files: If not None, whether the mypyc runtime library
                               should be directly #include'd instead of linked
                               separately in order to reduce compiler invocations.
                               Defaults to False in multi_file mode, True otherwise.
    """

    # Figure out our configuration
    compiler_options = CompilerOptions(
        strip_asserts=strip_asserts,
        multi_file=multi_file,
        verbose=verbose,
        separate=separate is not False,
        target_dir=target_dir,
        include_runtime_files=include_runtime_files,
    )

    # Generate all the actual important C code
    groups, group_cfilenames = mypyc_build(
        paths,
        only_compile_paths=only_compile_paths,
        compiler_options=compiler_options,
        separate=separate,
        skip_cgen_input=skip_cgen_input,
    )

    # Mess around with setuptools and actually get the thing built
    setup_mypycify_vars()

    # Create a compiler object so we can make decisions based on what
    # compiler is being used. typeshed is missing some attributes on the
    # compiler object so we give it type Any
    compiler: Any = ccompiler.new_compiler()
    sysconfig.customize_compiler(compiler)

    build_dir = compiler_options.target_dir

    cflags: List[str] = []
    if compiler.compiler_type == 'unix':
        cflags += [
            '-O{}'.format(opt_level),
            '-g{}'.format(debug_level),
            '-Werror',
            '-Wno-unused-function',
            '-Wno-unused-label',
            '-Wno-unreachable-code',
            '-Wno-unused-variable',
            '-Wno-unused-command-line-argument',
            '-Wno-unknown-warning-option',
        ]
        if 'gcc' in compiler.compiler[0] or 'gnu-cc' in compiler.compiler[0]:
            # This flag is needed for gcc but does not exist on clang.
            cflags += ['-Wno-unused-but-set-variable']
    elif compiler.compiler_type == 'msvc':
        if opt_level == '3':
            opt_level = '2'
        cflags += [
            '/O{}'.format(opt_level),
            '/wd4102',  # unreferenced label
            '/wd4101',  # unreferenced local variable
            '/wd4146',  # negating unsigned int
        ]
        if multi_file:
            # Disable whole program optimization in multi-file mode so
            # that we actually get the compilation speed and memory
            # use wins that multi-file mode is intended for.
            cflags += [
                '/GL-',
                '/wd9025',  # warning about overriding /GL
            ]

    # If configured to (defaults to yes in multi-file mode), copy the
    # runtime library in. Otherwise it just gets #included to save on
    # compiler invocations.
    shared_cfilenames = []
    if not compiler_options.include_runtime_files:
        for name in RUNTIME_C_FILES:
            rt_file = os.path.join(build_dir, name)
            with open(os.path.join(include_dir(), name),
                      encoding='utf-8') as f:
                write_file(rt_file, f.read())
            shared_cfilenames.append(rt_file)

    extensions = []
    for (group_sources, lib_name), (cfilenames,
                                    deps) in zip(groups, group_cfilenames):
        if lib_name:
            extensions.extend(
                build_using_shared_lib(group_sources, lib_name,
                                       cfilenames + shared_cfilenames, deps,
                                       build_dir, cflags))
        else:
            extensions.extend(
                build_single_module(group_sources,
                                    cfilenames + shared_cfilenames, cflags))

    return extensions
Ejemplo n.º 39
0
def detect_zmq(basedir, compiler=None, **compiler_attrs):
    """Compile, link & execute a test program, in empty directory `basedir`.

    The C compiler will be updated with any keywords given via setattr.

    Parameters
    ----------

    basedir : path
        The location where the test program will be compiled and run
    compiler : str
        The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32')
    **compiler_attrs : dict
        Any extra compiler attributes, which will be set via ``setattr(cc)``.

    Returns
    -------

    A dict of properties for zmq compilation, with the following two keys:

    vers : tuple
        The ZMQ version as a tuple of ints, e.g. (2,2,0)
    settings : dict
        The compiler options used to compile the test function, e.g. `include_dirs`,
        `library_dirs`, `libs`, etc.
    """

    cfile = pjoin(basedir, 'vers.c')
    shutil.copy(pjoin(os.path.dirname(__file__), 'vers.c'), cfile)

    # check if we need to link against Realtime Extensions library
    if sys.platform.startswith('linux'):
        cc = ccompiler.new_compiler(compiler=compiler)
        customize_compiler(cc)
        cc.output_dir = basedir
        info("Checking for timer_create")
        info(
            "** Errors about missing timer_create are a normal part of this process **"
        )
        if not cc.has_function('timer_create'):
            compiler_attrs['libraries'].append('rt')
            info(
                "** The above error about timer_create is normal and not a problem! **"
            )
            info("no timer_create, linking librt")

    cc = get_compiler(compiler=compiler, **compiler_attrs)
    efile = test_compilation(cfile, compiler=cc, **compiler_attrs)
    patch_lib_paths(efile, cc.library_dirs)

    # add library dirs to %PATH% for windows
    env = os.environ.copy()
    if sys.platform.startswith("win"):
        env["PATH"] = os.pathsep.join([env["PATH"]] + cc.library_dirs)

    rc, so, se = get_output_error([efile], env=env)
    if rc:
        msg = "Error running version detection script:\n%s\n%s" % (so, se)
        logging.error(msg)
        raise IOError(msg)

    handlers = {'vers': lambda val: tuple(int(v) for v in val.split('.'))}

    props = {}
    for line in (x for x in so.split('\n') if x):
        key, val = line.split(':')
        props[key] = handlers[key](val)

    return props
Ejemplo n.º 40
0
def check_for_openmp():
    """Returns True if local setup supports OpenMP, False otherwise
    Code adapted from astropy_helpers, originally written by Tom 
    Robitaille and Curtis McCully.
    """

    # See https://bugs.python.org/issue25150
    if sys.version_info[:3] == (3, 5, 0):
        return False

    # Create a temporary directory
    ccompiler = new_compiler()
    customize_compiler(ccompiler)

    tmp_dir = tempfile.mkdtemp()
    start_dir = os.path.abspath('.')

    if os.name == 'nt':
        # TODO: make this work with mingw
        # AFAICS there's no easy way to get the compiler distutils
        # will be using until compilation actually happens
        compile_flag = '-openmp'
        link_flag = ''
    else:
        compile_flag = '-fopenmp'
        link_flag = '-fopenmp'

    try:
        os.chdir(tmp_dir)

        with open('test_openmp.c', 'w') as f:
            f.write(CCODE)

        os.mkdir('objects')

        # Compile, link, and run test program
        with stdchannel_redirected(sys.stderr, os.devnull):
            ccompiler.compile(['test_openmp.c'], output_dir='objects',
                              extra_postargs=[compile_flag])
            ccompiler.link_executable(
                glob.glob(os.path.join('objects', '*')), 'test_openmp',
                extra_postargs=[link_flag])
            output = subprocess.check_output('./test_openmp').decode(
                sys.stdout.encoding or 'utf-8').splitlines()

        if 'nthreads=' in output[0]:
            nthreads = int(output[0].strip().split('=')[1])
            if len(output) == nthreads:
                using_openmp = True
            else:
                log.warn("Unexpected number of lines from output of test "
                         "OpenMP program (output was {0})".format(output))
                using_openmp = False
        else:
            log.warn("Unexpected output from test OpenMP "
                     "program (output was {0})".format(output))
            using_openmp = False

    except (CompileError, LinkError):
        using_openmp = False
    finally:
        os.chdir(start_dir)

    if using_openmp:
        log.warn("Using OpenMP to compile parallel extensions")
    else:
        log.warn("Unable to compile OpenMP test program so Cython\n"
                 "extensions will be compiled without parallel support")

    return using_openmp
Ejemplo n.º 41
0
    def run(self):
        from distutils.ccompiler import new_compiler

        # 'self.extensions', as supplied by setup.py, is a list of
        # Extension instances.  See the documentation for Extension (in
        # distutils.extension) for details.
        #
        # For backwards compatibility with Distutils 0.8.2 and earlier, we
        # also allow the 'extensions' list to be a list of tuples:
        #    (ext_name, build_info)
        # where build_info is a dictionary containing everything that
        # Extension instances do except the name, with a few things being
        # differently named.  We convert these 2-tuples to Extension
        # instances as needed.

        if not self.extensions:
            return

        # If we were asked to build any C/C++ libraries, make sure that the
        # directory where we put them is in the library search path for
        # linking extensions.
        if self.distribution.has_c_libraries():
            build_clib = self.get_finalized_command('build_clib')
            self.libraries.extend(build_clib.get_library_names() or [])
            self.library_dirs.append(build_clib.build_clib)

        # Setup the CCompiler object that we'll use to do all the
        # compiling and linking
        self.compiler = new_compiler(compiler=self.compiler,
                                     verbose=self.verbose,
                                     dry_run=self.dry_run,
                                     force=self.force)
        customize_compiler(self.compiler)
        # If we are cross-compiling, init the compiler now (if we are not
        # cross-compiling, init would not hurt, but people may rely on
        # late initialization of compiler even if they shouldn't...)
        if os.name == 'nt' and self.plat_name != get_platform():
            self.compiler.initialize(self.plat_name)

        # And make sure that any compile/link-related options (which might
        # come from the command-line or from the setup script) are set in
        # that CCompiler object -- that way, they automatically apply to
        # all compiling and linking done here.
        if self.include_dirs is not None:
            self.compiler.set_include_dirs(self.include_dirs)
        if self.define is not None:
            # 'define' option is a list of (name,value) tuples
            for (name, value) in self.define:
                self.compiler.define_macro(name, value)
        if self.undef is not None:
            for macro in self.undef:
                self.compiler.undefine_macro(macro)
        if self.libraries is not None:
            self.compiler.set_libraries(self.libraries)
        if self.library_dirs is not None:
            self.compiler.set_library_dirs(self.library_dirs)
        if self.rpath is not None:
            self.compiler.set_runtime_library_dirs(self.rpath)
        if self.link_objects is not None:
            self.compiler.set_link_objects(self.link_objects)

        # Now actually compile and link everything.
        self.build_extensions()
Ejemplo n.º 42
0
    def get_flags(self, cc=None):
        """
        Return a two-element tuple of CFLAGS and LDFLAGS for the compiler to use for
        JIT code generation.
        """

        cflags, ldflags = [], []

        # NumPy
        cflags.append('-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION')

        # FFTW3
        try:
            subprocess.check_output(['pkg-config', 'fftw3f', '--exists'])
            flags = subprocess.check_output(
                ['pkg-config', 'fftw3f', '--cflags'])
            try:
                flags = flags.decode()
            except AttributeError:
                # Python2 catch
                pass
            cflags.extend(flags.split())
            flags = subprocess.check_output(['pkg-config', 'fftw3f', '--libs'])
            try:
                flags = flags.decode()
            except AttributeError:
                # Python2 catch
                pass
            ldflags.extend(flags.split())
        except subprocess.CalledProcessError:
            cflags.extend([])
            ldflags.extend(['-lfftw3f', '-lm'])

        # OpenMP
        from distutils import sysconfig
        from distutils import ccompiler
        compiler = ccompiler.new_compiler()
        sysconfig.get_config_vars()
        sysconfig.customize_compiler(compiler)
        cc = compiler.compiler

        with TempBuildDir():
            with open('openmp_test.c', 'w') as fh:
                fh.write(r"""#include <omp.h>
#include <stdio.h>
int main(void) {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
return 0;
}
            """)
            try:
                call = []
                call.extend(cc)
                call.extend(cflags)
                call.extend([
                    '-fopenmp', 'openmp_test.c', '-o', 'openmp_test', '-lgomp'
                ])
                output = subprocess.check_output(call,
                                                 stderr=subprocess.STDOUT)
                cflags.append('-fopenmp')
                ldflags.append('-lgomp')
                os.unlink('openmp_test')
            except subprocess.CalledProcessError:
                pass

        # Other
        cflags.append('-O2')

        return cflags, ldflags
Ejemplo n.º 43
0
    def initialize(self):
        if self._is_initialized:
            return

        cc = new_compiler()
        customize_compiler(cc)

        self.msvc = cc.compiler_type == 'msvc'
        self._print_compiler_version(cc)

        if self.disable_openmp:
            self.openmp_enabled = False
        else:
            self.openmp_enabled, openmp_needs_gomp = self._detect_openmp()
        self.sse3_enabled = self._detect_sse3() if not self.msvc else True
        self.sse41_enabled = self._detect_sse41() if not self.msvc else True
        self.neon_enabled = self._detect_neon() if not self.msvc else False

        # -DNO_WARN_X86_INTRINSICS only compatible with gcc version >= 8.0 and IBM Advanced Toolchain version >= 11.0
        # This is a direct conversions to allow x86 SSE calls to be ported to openPower Vector Intrinsics
        # reference: https://developer.ibm.com/tutorials/migrate-app-on-lop/?_ga=2.38728486.485083667.1620858815-1927233392.1620858815&cm_mc_uid=79453381708616208588147&cm_mc_sid_50200000=72302701620930882541
        self.compiler_args_sse2 = [
            '-DNO_WARN_X86_INTRINSICS -fsigned-char -msse2'
        ] if not self.msvc else ['/arch:SSE2']
        self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled
                                                  and not self.msvc) else []
        self.compiler_args_neon = []
        self.compiler_args_warn = [
            '-Wno-unused-function', '-Wno-unreachable-code',
            '-Wno-sign-compare'
        ] if not self.msvc else []

        if self.neon_enabled:
            self.compiler_args_sse2 = []
            self.compiler_args_sse3 = []

        self.compiler_args_sse41, self.define_macros_sse41 = [], []
        if self.sse41_enabled:
            self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)]
            if not self.msvc:
                self.compiler_args_sse41 = ['-msse4']

        if self.openmp_enabled:
            self.compiler_libraries_openmp = []

            if self.msvc:
                self.compiler_args_openmp = ['/openmp']
            else:
                self.compiler_args_openmp = ['-fopenmp']
                if openmp_needs_gomp:
                    self.compiler_libraries_openmp = ['gomp']
        else:
            self.compiler_libraries_openmp = []
            self.compiler_args_openmp = []

        if self.msvc:
            self.compiler_args_opt = ['/O2']
        else:
            self.compiler_args_opt = ['-O3', '-funroll-loops', '--std=c++11']
        print()
        self._is_initialized = True
Ejemplo n.º 44
0
def customize_compiler(compiler, lang=None,
                       mpicc=None, mpicxx=None, mpild=None,
                       ):
    sysconfig.customize_compiler(compiler)
    if compiler.compiler_type == 'unix':
        ld = compiler.linker_exe
        for envvar in ('LDFLAGS', 'CFLAGS', 'CPPFLAGS'):
            if envvar in os.environ:
                ld += shlex.split(os.environ[envvar])
    if sys.platform == 'darwin':
        badcflags = ['-mno-fused-madd']
        for attr in (
            'preprocessor',
            'compiler', 'compiler_cxx', 'compiler_so',
            'linker_so', 'linker_exe',
        ):
            compiler_cmd = getattr(compiler, attr, None)
            if compiler_cmd is None: continue
            for flag in badcflags:
                while flag in compiler_cmd:
                    compiler_cmd.remove(flag)
    if compiler.compiler_type == 'unix':
        # Compiler command overriding
        if mpicc:
            fix_compiler_cmd(compiler.compiler, mpicc)
            if lang in ('c', None):
                fix_compiler_cmd(compiler.compiler_so, mpicc)
        if mpicxx:
            fix_compiler_cmd(compiler.compiler_cxx, mpicxx)
            if lang == 'c++':
                fix_compiler_cmd(compiler.compiler_so, mpicxx)
        if mpild:
            for ld in [compiler.linker_so, compiler.linker_exe]:
                fix_linker_cmd(ld, mpild)
    if compiler.compiler_type == 'cygwin':
        compiler.set_executables(
            preprocessor = 'gcc -mcygwin -E',
            )
    if compiler.compiler_type == 'mingw32':
        compiler.set_executables(
            preprocessor = 'gcc -mno-cygwin -E',
            )
    if compiler.compiler_type in ('unix', 'cygwin', 'mingw32'):
        badcxxflags = [ '-Wimplicit', '-Wstrict-prototypes']
        for flag in badcxxflags:
            while flag in compiler.compiler_cxx:
                compiler.compiler_cxx.remove(flag)
            if lang == 'c++':
                while flag in compiler.compiler_so:
                    compiler.compiler_so.remove(flag)
    if compiler.compiler_type == 'mingw32':
        # Remove msvcrXX.dll
        del compiler.dll_libraries[:]
        # https://bugs.python.org/issue12641
        if compiler.gcc_version >= '4.4':
            for attr in (
                'preprocessor',
                'compiler', 'compiler_cxx', 'compiler_so',
                'linker_so', 'linker_exe',
            ):
                try: getattr(compiler, attr).remove('-mno-cygwin')
                except: pass
        # Add required define and compiler flags for AMD64
        if platform.architecture()[0] == '64bit':
            for attr in (
                'preprocessor',
                'compiler', 'compiler_cxx', 'compiler_so',
                'linker_so', 'linker_exe',
            ):
                getattr(compiler, attr).insert(1, '-DMS_WIN64')
                getattr(compiler, attr).insert(1, '-m64')
    if compiler.compiler_type == 'msvc':
        if not compiler.initialized: compiler.initialize()
        compiler.ldflags_shared.append('/MANIFEST')
        compiler.ldflags_shared_debug.append('/MANIFEST')
Ejemplo n.º 45
0
    def run(self):
        cmd = self.reinitialize_command("build_ext")
        cmd.inplace = True
        cmd.force = self.force
        cmd.ensure_finalized()
        cmd.run()

        gidatadir = pkg_config_parse(
            "--variable=gidatadir", "gobject-introspection-1.0")[0]
        g_ir_scanner = pkg_config_parse(
            "--variable=g_ir_scanner", "gobject-introspection-1.0")[0]
        g_ir_compiler = pkg_config_parse(
            "--variable=g_ir_compiler", "gobject-introspection-1.0")[0]

        script_dir = get_script_dir()
        gi_dir = os.path.join(script_dir, "gi")
        tests_dir = os.path.join(script_dir, "tests")
        gi_tests_dir = os.path.join(gidatadir, "tests")

        schema_xml = os.path.join(tests_dir, "org.gnome.test.gschema.xml")
        schema_bin = os.path.join(tests_dir, "gschemas.compiled")
        if self._newer_group([schema_xml], schema_bin):
            subprocess.check_call([
                "glib-compile-schemas",
                "--targetdir=%s" % tests_dir,
                "--schema-file=%s" % schema_xml,
            ])

        compiler = new_compiler()
        customize_compiler(compiler)

        if os.name == "nt":
            compiler.shared_lib_extension = ".dll"
        elif sys.platform == "darwin":
            compiler.shared_lib_extension = ".dylib"
            if "-bundle" in compiler.linker_so:
                compiler.linker_so = list(compiler.linker_so)
                i = compiler.linker_so.index("-bundle")
                compiler.linker_so[i] = "-dynamiclib"
        else:
            compiler.shared_lib_extension = ".so"

        if compiler.compiler_type == "msvc":
            g_ir_scanner_cmd = [sys.executable, g_ir_scanner]
        else:
            g_ir_scanner_cmd = [g_ir_scanner]

        def build_ext(ext):

            libname = compiler.shared_object_filename(ext.name)
            ext_paths = [os.path.join(tests_dir, libname)]
            if os.name == "nt":
                if compiler.compiler_type == "msvc":
                    # MSVC: Get rid of the 'lib' prefix and the .dll
                    #       suffix from libname, and append .lib so
                    #       that we get the right .lib filename to
                    #       pass to g-ir-scanner with --library
                    implibname = libname[3:libname.rfind(".dll")] + '.lib'
                else:
                    implibname = libname + ".a"
                ext_paths.append(os.path.join(tests_dir, implibname))

            if self._newer_group(ext.sources + ext.depends, *ext_paths):
                # MSVC: We need to define _GI_EXTERN explcitly so that
                #       symbols get exported properly
                if compiler.compiler_type == "msvc":
                    extra_defines = [('_GI_EXTERN',
                                      '__declspec(dllexport)extern')]
                else:
                    extra_defines = []
                objects = compiler.compile(
                    ext.sources,
                    output_dir=self.build_temp,
                    include_dirs=ext.include_dirs,
                    macros=ext.define_macros + extra_defines)

                if os.name == "nt":
                    if compiler.compiler_type == "msvc":
                        postargs = ["-implib:%s" %
                                    os.path.join(tests_dir, implibname)]
                    else:
                        postargs = ["-Wl,--out-implib=%s" %
                                    os.path.join(tests_dir, implibname)]
                else:
                    postargs = []

                compiler.link_shared_object(
                    objects,
                    compiler.shared_object_filename(ext.name),
                    output_dir=tests_dir,
                    libraries=ext.libraries,
                    library_dirs=ext.library_dirs,
                    extra_postargs=postargs)

            return ext_paths

        ext = Extension(
            name='libgimarshallingtests',
            sources=[
                os.path.join(gi_tests_dir, "gimarshallingtests.c"),
                os.path.join(tests_dir, "gimarshallingtestsextra.c"),
            ],
            include_dirs=[
                gi_tests_dir,
                tests_dir,
            ],
            depends=[
                os.path.join(gi_tests_dir, "gimarshallingtests.h"),
                os.path.join(tests_dir, "gimarshallingtestsextra.h"),
            ],
        )
        add_ext_pkg_config_dep(ext, compiler.compiler_type, "glib-2.0")
        add_ext_pkg_config_dep(ext, compiler.compiler_type, "gio-2.0")
        ext_paths = build_ext(ext)

        # We want to always use POSIX-style paths for g-ir-compiler
        # because it expects the input .gir file and .typelib file to use
        # POSIX-style paths, otherwise it fails
        gir_path = posixpath.join(
            tests_dir, "GIMarshallingTests-1.0.gir")
        typelib_path = posixpath.join(
            tests_dir, "GIMarshallingTests-1.0.typelib")

        gimarshal_g_ir_scanner_cmd = g_ir_scanner_cmd + [
            "--no-libtool",
            "--include=Gio-2.0",
            "--namespace=GIMarshallingTests",
            "--nsversion=1.0",
            "--symbol-prefix=gi_marshalling_tests",
            "--warn-all",
            "--warn-error",
            "--library-path=%s" % tests_dir,
            "--library=gimarshallingtests",
            "--pkg=glib-2.0",
            "--pkg=gio-2.0",
            "--cflags-begin",
            "-I%s" % gi_tests_dir,
            "--cflags-end",
            "--output=%s" % gir_path,
        ]

        if self._newer_group(ext_paths, gir_path):
            subprocess.check_call(gimarshal_g_ir_scanner_cmd +
                                  ext.sources + ext.depends)

        if self._newer_group([gir_path], typelib_path):
            subprocess.check_call([
                g_ir_compiler,
                gir_path,
                "--output=%s" % typelib_path,
            ])

        regress_macros = []
        if not WITH_CAIRO:
            regress_macros.append(("_GI_DISABLE_CAIRO", "1"))

        ext = Extension(
            name='libregress',
            sources=[
                os.path.join(gi_tests_dir, "regress.c"),
                os.path.join(tests_dir, "regressextra.c"),
            ],
            include_dirs=[
                gi_tests_dir,
            ],
            depends=[
                os.path.join(gi_tests_dir, "regress.h"),
                os.path.join(tests_dir, "regressextra.h"),
            ],
            define_macros=regress_macros,
        )
        add_ext_pkg_config_dep(ext, compiler.compiler_type, "glib-2.0")
        add_ext_pkg_config_dep(ext, compiler.compiler_type, "gio-2.0")
        if WITH_CAIRO:
            add_ext_pkg_config_dep(ext, compiler.compiler_type, "cairo")
            add_ext_pkg_config_dep(
                ext, compiler.compiler_type, "cairo-gobject")
        ext_paths = build_ext(ext)

        # We want to always use POSIX-style paths for g-ir-compiler
        # because it expects the input .gir file and .typelib file to use
        # POSIX-style paths, otherwise it fails
        gir_path = posixpath.join(tests_dir, "Regress-1.0.gir")
        typelib_path = posixpath.join(tests_dir, "Regress-1.0.typelib")
        regress_g_ir_scanner_cmd = g_ir_scanner_cmd + [
            "--no-libtool",
            "--include=Gio-2.0",
            "--namespace=Regress",
            "--nsversion=1.0",
            "--warn-all",
            "--warn-error",
            "--library-path=%s" % tests_dir,
            "--library=regress",
            "--pkg=glib-2.0",
            "--pkg=gio-2.0"]

        if self._newer_group(ext_paths, gir_path):
            if WITH_CAIRO:
                regress_g_ir_scanner_cmd += ["--include=cairo-1.0"]
                # MSVC: We don't normally have the pkg-config files for
                # cairo and cairo-gobject, so use --extra-library
                # instead of --pkg to pass those to the linker, so that
                # g-ir-scanner won't fail due to linker errors
                if compiler.compiler_type == "msvc":
                    regress_g_ir_scanner_cmd += [
                        "--extra-library=cairo",
                        "--extra-library=cairo-gobject"]

                else:
                    regress_g_ir_scanner_cmd += [
                        "--pkg=cairo",
                        "--pkg=cairo-gobject"]
            else:
                regress_g_ir_scanner_cmd += ["-D_GI_DISABLE_CAIRO"]

            regress_g_ir_scanner_cmd += ["--output=%s" % gir_path]

            subprocess.check_call(regress_g_ir_scanner_cmd +
                                  ext.sources + ext.depends)

        if self._newer_group([gir_path], typelib_path):
            subprocess.check_call([
                g_ir_compiler,
                gir_path,
                "--output=%s" % typelib_path,
            ])

        ext = Extension(
            name='tests.testhelper',
            sources=[
                os.path.join(tests_dir, "testhelpermodule.c"),
                os.path.join(tests_dir, "test-floating.c"),
                os.path.join(tests_dir, "test-thread.c"),
                os.path.join(tests_dir, "test-unknown.c"),
            ],
            include_dirs=[
                gi_dir,
                tests_dir,
            ],
            depends=list_headers(gi_dir) + list_headers(tests_dir),
            define_macros=[("PY_SSIZE_T_CLEAN", None)],
        )
        add_ext_pkg_config_dep(ext, compiler.compiler_type, "glib-2.0")
        add_ext_pkg_config_dep(ext, compiler.compiler_type, "gio-2.0")
        add_ext_compiler_flags(ext, compiler)

        dist = Distribution({"ext_modules": [ext]})

        build_cmd = dist.get_command_obj("build")
        build_cmd.build_base = os.path.join(self.build_base, "pygobject_tests")
        build_cmd.ensure_finalized()

        cmd = dist.get_command_obj("build_ext")
        cmd.inplace = True
        cmd.force = self.force
        cmd.ensure_finalized()
        cmd.run()
def check_openmp_support():
    """Check whether OpenMP test code can be compiled and run"""
    ccompiler = new_compiler()
    customize_compiler(ccompiler)

    if os.getenv('SKLEARN_NO_OPENMP'):
        # Build explicitly without OpenMP support
        return False

    start_dir = os.path.abspath('.')

    with tempfile.TemporaryDirectory() as tmp_dir:
        try:
            os.chdir(tmp_dir)

            # Write test program
            with open('test_openmp.c', 'w') as f:
                f.write(CCODE)

            os.mkdir('objects')

            # Compile, test program
            openmp_flags = get_openmp_flag(ccompiler)
            ccompiler.compile(['test_openmp.c'], output_dir='objects',
                              extra_postargs=openmp_flags)

            # Link test program
            extra_preargs = os.getenv('LDFLAGS', None)
            if extra_preargs is not None:
                extra_preargs = extra_preargs.split(" ")
            else:
                extra_preargs = []

            objects = glob.glob(
                os.path.join('objects', '*' + ccompiler.obj_extension))
            ccompiler.link_executable(objects, 'test_openmp',
                                      extra_preargs=extra_preargs,
                                      extra_postargs=openmp_flags)

            # Run test program
            output = subprocess.check_output('./test_openmp')
            output = output.decode(sys.stdout.encoding or 'utf-8').splitlines()

            # Check test program output
            if 'nthreads=' in output[0]:
                nthreads = int(output[0].strip().split('=')[1])
                openmp_supported = (len(output) == nthreads)
            else:
                openmp_supported = False

        except (CompileError, LinkError, subprocess.CalledProcessError):
            openmp_supported = False

        finally:
            os.chdir(start_dir)

    err_message = textwrap.dedent(
        """
                            ***

        It seems that scikit-learn cannot be built with OpenMP support.

        - Make sure you have followed the installation instructions:

            https://scikit-learn.org/dev/developers/advanced_installation.html

        - If your compiler supports OpenMP but the build still fails, please
          submit a bug report at:

            https://github.com/scikit-learn/scikit-learn/issues

        - If you want to build scikit-learn without OpenMP support, you can set
          the environment variable SKLEARN_NO_OPENMP and rerun the build
          command. Note however that some estimators will run in sequential
          mode and their `n_jobs` parameter will have no effect anymore.

                            ***
        """)

    if not openmp_supported:
        raise CompileError(err_message)

    return True
Ejemplo n.º 47
0
def check_for_openmp():
    """Returns OpenMP compiler and linker flags if local setup supports
    OpenMP or [], [] otherwise

    Code adapted from astropy_helpers, originally written by Tom
    Robitaille and Curtis McCully.
    """

    # Create a temporary directory
    ccompiler = new_compiler()
    customize_compiler(ccompiler)

    tmp_dir = tempfile.mkdtemp()
    start_dir = os.path.abspath(".")

    CCODE = dedent("""\
        #include <omp.h>
        #include <stdio.h>
        int main() {
            omp_set_num_threads(2);
            #pragma omp parallel
            printf("nthreads=%d\\n", omp_get_num_threads());
            return 0;
        }""")

    # TODO: test more known compilers:
    # MinGW, AppleClang with libomp, MSVC, ICC, XL, PGI, ...
    if os.name == "nt":
        # TODO: make this work with mingw
        # AFAICS there's no easy way to get the compiler distutils
        # will be using until compilation actually happens
        compile_flags = ["-openmp"]
        link_flags = [""]
    else:
        compile_flags = ["-fopenmp"]
        link_flags = ["-fopenmp"]

    try:
        os.chdir(tmp_dir)

        with open("test_openmp.c", "w") as f:
            f.write(CCODE)

        os.mkdir("objects")

        # Compile, link, and run test program
        with stdchannel_redirected(sys.stderr, os.devnull):
            ccompiler.compile(["test_openmp.c"],
                              output_dir="objects",
                              extra_postargs=compile_flags)
            ccompiler.link_executable(
                glob.glob(os.path.join("objects", "*")),
                "test_openmp",
                extra_postargs=link_flags,
            )
            output = (subprocess.check_output("./test_openmp").decode(
                sys.stdout.encoding or "utf-8").splitlines())

        if "nthreads=" in output[0]:
            nthreads = int(output[0].strip().split("=")[1])
            if len(output) == nthreads:
                using_openmp = True
            else:
                log.warn(
                    "Unexpected number of lines from output of test "
                    "OpenMP program (output was %s)",
                    output,
                )
                using_openmp = False
        else:
            log.warn(
                "Unexpected output from test OpenMP program (output was %s)",
                output)
            using_openmp = False

    except (CompileError, LinkError):
        using_openmp = False
    finally:
        os.chdir(start_dir)

    if using_openmp:
        log.warn("Using OpenMP to compile parallel extensions")
    else:
        log.warn("Unable to compile OpenMP test program so Cython\n"
                 "extensions will be compiled without parallel support")

    if using_openmp:
        return compile_flags, link_flags
    else:
        return [], []
Ejemplo n.º 48
0
    def build_extensions(self):
        customize_compiler(self.compiler)
        self._saved_compiler_so = self.compiler.compiler_so

        super(build_ext_custom, self).build_extensions()
Ejemplo n.º 49
0
"""distutils.command.build_clib
Ejemplo n.º 50
0
            print(A())
        except Exception as e:
            print("Error:", e)
    else:
        print(A)

print("sysconfig variables")
pprint(get_config_vars())

print("inspect distutils")

from distutils.sysconfig import customize_compiler
from distutils.ccompiler import new_compiler

compiler = new_compiler(verbose=1, dry_run=1, force=1)
customize_compiler(compiler)

def snoop(obj, attr):
    orig = getattr(obj, attr)
    @wraps(orig)
    def wrapper(*args, **kws):
        print("SNOOP", attr, args, kws)
        return orig(*args, **kws)
    setattr(obj, attr, wrapper)

snoop(compiler, "spawn")

# dummy values for all options so we can see how they are translated
# into compiler arguments
# -I
compiler.set_include_dirs(['idirA','idirB'])
Ejemplo n.º 51
0
def using_clang():
    """Will we be using a clang compiler?"""
    compiler = new_compiler()
    customize_compiler(compiler)
    compiler_ver = getoutput("{0} -v".format(compiler.compiler[0]))
    return 'clang' in compiler_ver
Ejemplo n.º 52
0
def pre_build_check():
    """
    Try to verify build tools
    """
    if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'):
        return True

    try:
        from distutils.ccompiler import new_compiler
        from distutils.sysconfig import customize_compiler
        from distutils.dist import Distribution

        # base build_ext just to emulate compiler option setup
        be = build_ext(Distribution())
        be.initialize_options()
        be.finalize_options()

        # First, make sure we have a Python include directory
        have_python_include = any(
            os.path.isfile(os.path.join(p, 'Python.h'))
            for p in be.include_dirs)
        if not have_python_include:
            sys.stderr.write("Did not find 'Python.h' in %s.\n" %
                             (be.include_dirs, ))
            return False

        compiler = new_compiler(compiler=be.compiler)
        customize_compiler(compiler)

        try:
            # We must be able to initialize the compiler if it has that method
            if hasattr(compiler, "initialize"):
                compiler.initialize()
        except:
            return False

        executables = []
        if compiler.compiler_type in ('unix', 'cygwin'):
            executables = [
                compiler.executables[exe][0]
                for exe in ('compiler_so', 'linker_so')
            ]
        elif compiler.compiler_type == 'nt':
            executables = [getattr(compiler, exe) for exe in ('cc', 'linker')]

        if executables:
            from distutils.spawn import find_executable
            for exe in executables:
                if not find_executable(exe):
                    sys.stderr.write(
                        "Failed to find %s for compiler type %s.\n" %
                        (exe, compiler.compiler_type))
                    return False

    except Exception as exc:
        sys.stderr.write('%s\n' % str(exc))
        sys.stderr.write("Failed pre-build check. Attempting anyway.\n")

    # if we are unable to positively id the compiler type, or one of these assumptions fails,
    # just proceed as we would have without the check
    return True
Ejemplo n.º 53
0
def get_openmp():
    """Try to compile/link an example program to check for OpenMP support.
    
    Based on:
    1) http://stackoverflow.com/questions/16549893/programatically-testing-for-openmp-support-from-a-python-setup-script
    2) https://github.com/lpsinger/healpy/blob/6c3aae58b5f3281e260ef7adce17b1ffc68016f0/setup.py
    """

    import shutil
    from distutils import sysconfig
    from distutils import ccompiler
    compiler = ccompiler.new_compiler()
    sysconfig.get_config_vars()
    sysconfig.customize_compiler(compiler)
    cc = compiler.compiler

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    fh = open('test.c', 'w')
    fh.write(r"""#include <omp.h>
#include <stdio.h>
int main(void) {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
return 0;
}
""")
    fh.close()

    ccmd = []
    ccmd.extend(cc)
    ccmd.extend(['-fopenmp', 'test.c', '-o test'])
    if os.path.basename(cc[0]).find('gcc') != -1:
        ccmd.append('-lgomp')
    elif os.path.basename(cc[0]).find('clang') != -1:
        ccmd.extend(['-L/opt/local/lib/libomp', '-lomp'])
    try:
        output = subprocess.check_call(ccmd)
        outCFLAGS = [
            '-fopenmp',
        ]
        outLIBS = []
        if os.path.basename(cc[0]).find('gcc') != -1:
            outLIBS.append('-lgomp')
        elif os.path.basename(cc[0]).find('clang') != -1:
            outLIBS.extend(['-L/opt/local/lib/libomp', '-lomp'])

    except subprocess.CalledProcessError:
        print(
            "WARNING:  OpenMP does not appear to be supported by %s, disabling"
            % cc[0])
        outCFLAGS = []
        outLIBS = []

    finally:
        os.chdir(curdir)
        shutil.rmtree(tmpdir)

    return outCFLAGS, outLIBS
Ejemplo n.º 54
0
    def __init__(self,
                 environ=os.environ,
                 osname=os.name,
                 compiler_name=None):

        if osname == 'nt':
            # The compiler used here on Windows may well not be
            # the same compiler that was used to build Python,
            # as the official Python binaries are built with
            # Visual Studio
            if compiler_name is None:
                if environ.get('MSYSTEM') == 'MINGW32' or environ.get('MSYSTEM') == 'MINGW64':
                    compiler_name = 'mingw32'
                else:
                    compiler_name = distutils.ccompiler.get_default_compiler()
            if compiler_name != 'msvc' and \
               compiler_name != 'mingw32':
                raise SystemExit('Specified Compiler \'%s\' is unsupported.' % compiler_name)
        else:
            # XXX: Is it common practice to use a non-Unix compiler
            #      class instance on non-Windows on platforms g-i supports?
            compiler_name = distutils.ccompiler.get_default_compiler()

        # Now, create the distutils ccompiler instance based on the info we have.
        if compiler_name == 'msvc':
            # For MSVC, we need to create a instance of a subclass of distutil's
            # MSVC9Compiler class, as it does not provide a preprocess()
            # implementation
            from . import msvccompiler
            self.compiler = msvccompiler.get_msvc_compiler()

        else:
            self.compiler = distutils.ccompiler.new_compiler(compiler=compiler_name)
        customize_compiler(self.compiler)

        # customize_compiler() from distutils only does customization
        # for 'unix' compiler type.  Also, avoid linking to msvcrxx.dll
        # for MinGW builds as the dumper binary does not link to the
        # Python DLL, but link to msvcrt.dll if necessary.
        if isinstance(self.compiler, Mingw32CCompiler):
            if self.compiler.dll_libraries != ['msvcrt']:
                self.compiler.dll_libraries = []
            if self.compiler.preprocessor is None:
                self.compiler.preprocessor = self.compiler.compiler + ['-E']

        if self.check_is_msvc():
            # We trick distutils to believe that we are (always) using a
            # compiler supplied by a Windows SDK, so that we avoid launching
            # a new build environment to detect the compiler that is used to
            # build Python itself, which is not desirable, so that we use the
            # compiler commands (and env) as-is.
            os.environ['DISTUTILS_USE_SDK'] = '1'
            if 'MSSdk' not in os.environ:
                if 'WindowsSDKDir' in os.environ:
                    os.environ['MSSdk'] = os.environ.get('WindowsSDKDir')
                elif os.environ.get('VCInstallDir'):
                    os.environ['MSSdk'] = os.environ.get('VCInstallDir')

            self.compiler_cmd = 'cl.exe'

            self._cflags_no_deprecation_warnings = "-wd4996"
        else:
            if (isinstance(self.compiler, Mingw32CCompiler)):
                self.compiler_cmd = self.compiler.compiler[0]
            else:
                self.compiler_cmd = ' '.join(self.compiler.compiler)

            self._cflags_no_deprecation_warnings = "-Wno-deprecated-declarations"