Example #1
0
    def compile_source(self, directory, compiler, debug, clean, native):
        with in_directory(directory):
            if compiler == 'msvc':
                from distutils import msvc9compiler
                # TODO: handle debug
                if debug:
                    logger.warn('Debug flag currently ignored for MSVC')
                vcvars_loc = prefs['codegen.cpp.msvc_vars_location']
                if vcvars_loc == '':
                    for version in xrange(16, 8, -1):
                        fname = msvc9compiler.find_vcvarsall(version)
                        if fname:
                            vcvars_loc = fname
                            break
                if vcvars_loc == '':
                    raise IOError("Cannot find vcvarsall.bat on standard "
                                  "search path. Set the "
                                  "codegen.cpp.msvc_vars_location preference "
                                  "explicitly.")
                # TODO: copy vcvars and make replacements for 64 bit automatically
                arch_name = prefs['codegen.cpp.msvc_architecture']
                if arch_name == '':
                    mach = platform.machine()
                    if mach == 'AMD64':
                        arch_name = 'x86_amd64'
                    else:
                        arch_name = 'x86'

                vcvars_cmd = '"{vcvars_loc}" {arch_name}'.format(
                    vcvars_loc=vcvars_loc, arch_name=arch_name)
                make_cmd = 'nmake /f win_makefile'
                if os.path.exists('winmake.log'):
                    os.remove('winmake.log')
                with std_silent(debug):
                    if clean:
                        os.system(
                            '%s >>winmake.log 2>&1 && %s clean >>winmake.log 2>&1'
                            % (vcvars_cmd, make_cmd))
                    x = os.system(
                        '%s >>winmake.log 2>&1 && %s >>winmake.log 2>&1' %
                        (vcvars_cmd, make_cmd))
                    if x != 0:
                        raise RuntimeError("Project compilation failed")
            else:
                with std_silent(debug):
                    if clean:
                        os.system('make clean')
                    if debug:
                        x = os.system('make debug')
                    elif native:
                        x = os.system('make native')
                    else:
                        x = os.system('make')
                    if x != 0:
                        raise RuntimeError("Project compilation failed")
Example #2
0
 def compile_source(self, directory, compiler, debug, clean, native):
     with in_directory(directory):
         if compiler == 'msvc':
             from distutils import msvc9compiler
             # TODO: handle debug
             if debug:
                 logger.warn('Debug flag currently ignored for MSVC')
             vcvars_loc = prefs['codegen.cpp.msvc_vars_location']
             if vcvars_loc == '':
                 for version in xrange(16, 8, -1):
                     fname = msvc9compiler.find_vcvarsall(version)
                     if fname:
                         vcvars_loc = fname
                         break
             if vcvars_loc == '':
                 raise IOError("Cannot find vcvarsall.bat on standard "
                               "search path. Set the "
                               "codegen.cpp.msvc_vars_location preference "
                               "explicitly.")
             # TODO: copy vcvars and make replacements for 64 bit automatically
             arch_name = prefs['codegen.cpp.msvc_architecture']
             if arch_name == '':
                 mach = platform.machine()
                 if mach == 'AMD64':
                     arch_name = 'x86_amd64'
                 else:
                     arch_name = 'x86'
             
             vcvars_cmd = '"{vcvars_loc}" {arch_name}'.format(
                     vcvars_loc=vcvars_loc, arch_name=arch_name)
             make_cmd = 'nmake /f win_makefile'
             if os.path.exists('winmake.log'):
                 os.remove('winmake.log')
             with std_silent(debug):
                 if clean:
                     os.system('%s >>winmake.log 2>&1 && %s clean >>winmake.log 2>&1' % (vcvars_cmd, make_cmd))
                 x = os.system('%s >>winmake.log 2>&1 && %s >>winmake.log 2>&1' % (vcvars_cmd, make_cmd))
                 if x!=0:
                     if os.path.exists('winmake.log'):
                         print open('winmake.log', 'r').read()
                     raise RuntimeError("Project compilation failed")
         else:
             with std_silent(debug):
                 if clean:
                     os.system('make clean')
                 if debug:
                     x = os.system('make debug')
                 elif native:
                     x = os.system('make native')
                 else:
                     x = os.system('make')
                 if x!=0:
                     raise RuntimeError("Project compilation failed")
Example #3
0
    def compile_source(self, directory, compiler, debug, clean):
        with in_directory(directory):
            if compiler == "msvc":
                from distutils import msvc9compiler

                # TODO: handle debug
                if debug:
                    logger.warn("Debug flag currently ignored for MSVC")
                vcvars_loc = prefs["codegen.cpp.msvc_vars_location"]
                if vcvars_loc == "":
                    for version in xrange(16, 8, -1):
                        fname = msvc9compiler.find_vcvarsall(version)
                        if fname:
                            vcvars_loc = fname
                            break
                if vcvars_loc == "":
                    raise IOError(
                        "Cannot find vcvarsall.bat on standard "
                        "search path. Set the "
                        "codegen.cpp.msvc_vars_location preference "
                        "explicitly."
                    )
                # TODO: copy vcvars and make replacements for 64 bit automatically
                arch_name = prefs["codegen.cpp.msvc_architecture"]
                if arch_name == "":
                    mach = platform.machine()
                    if mach == "AMD64":
                        arch_name = "x86_amd64"
                    else:
                        arch_name = "x86"

                vcvars_cmd = '"{vcvars_loc}" {arch_name}'.format(vcvars_loc=vcvars_loc, arch_name=arch_name)
                make_cmd = "nmake /f win_makefile"
                if os.path.exists("winmake.log"):
                    os.remove("winmake.log")
                with std_silent(debug):
                    if clean:
                        os.system("%s >>winmake.log 2>&1 && %s clean >>winmake.log 2>&1" % (vcvars_cmd, make_cmd))
                    x = os.system("%s >>winmake.log 2>&1 && %s >>winmake.log 2>&1" % (vcvars_cmd, make_cmd))
                    if x != 0:
                        if os.path.exists("winmake.log"):
                            print open("winmake.log", "r").read()
                        raise RuntimeError("Project compilation failed")
            else:
                with std_silent(debug):
                    if clean:
                        os.system("make clean")
                    if debug:
                        x = os.system("make debug")
                    else:
                        x = os.system("make")
                    if x != 0:
                        raise RuntimeError("Project compilation failed")
Example #4
0
 def run(self):
     if self.compiled_python_pre is not None:
         exec self.compiled_python_pre in self.python_code_namespace
     if self._done_first_run:
         ret_val = self._compiled_func(self.namespace, {})
     else:
         self._inline_args = (self.annotated_code, self.namespace.keys())
         self._inline_kwds = dict(
             local_dict=self.namespace,
             support_code=self.code.support_code,
             compiler=self.compiler,
             headers=self.headers,
             define_macros=self.define_macros,
             libraries=self.libraries,
             extra_compile_args=self.extra_compile_args,
             extra_link_args=self.extra_link_args,
             include_dirs=self.include_dirs,
             library_dirs=self.library_dirs,
             verbose=0)
         with std_silent():
             ret_val = weave.inline(*self._inline_args, **self._inline_kwds)
         self._compiled_func = function_cache[self.annotated_code]
         self._done_first_run = True
     if self.compiled_python_post is not None:
         exec self.compiled_python_post in self.python_code_namespace
     return ret_val
Example #5
0
 def is_available(cls):
     try:
         with std_silent(False):
             compiler, extra_compile_args = get_compiler_and_args()
             extra_link_args = prefs['codegen.cpp.extra_link_args']
             library_dirs = prefs['codegen.cpp.library_dirs']
             update_for_cross_compilation(library_dirs,
                                          extra_compile_args,
                                          extra_link_args,
                                          logger=logger)
             weave.inline('int x=0;', [],
                          compiler=compiler,
                          headers=['<algorithm>', '<limits>'],
                          extra_compile_args=extra_compile_args,
                          extra_link_args=extra_link_args,
                          library_dirs=library_dirs,
                          include_dirs=prefs['codegen.cpp.include_dirs'],
                          verbose=0)
             return True
     except Exception as ex:
         logger.warn(('Cannot use weave, a test compilation '
                      'failed: %s (%s)' % (str(ex),
                                           ex.__class__.__name__)) ,
                     'failed_compile_test')
         return False
Example #6
0
 def is_available():
     try:
         with std_silent(False):
             compiler, extra_compile_args = get_compiler_and_args()
             extra_link_args = prefs['codegen.cpp.extra_link_args']
             library_dirs = prefs['codegen.cpp.library_dirs']
             if (platform.system() == 'Linux' and
                         platform.architecture()[0] == '32bit' and
                         platform.machine() == 'x86_64'):
                 # We are cross-compiling to 32bit on a 64bit platform
                 logger.info('Cross-compiling to 32bit on a 64bit platform, a set '
                             'of standard compiler options will be appended for '
                             'this purpose (note that you need to have a 32bit '
                             'version of the standard library for this to work).',
                             '64bit_to_32bit',
                             once=True)
                 library_dirs += ['/lib32', '/usr/lib32']
                 extra_compile_args += ['-m32']
                 extra_link_args += ['-m32']
             weave.inline('int x=0;', [],
                          compiler=compiler,
                          headers=['<algorithm>', '<limits>'],
                          extra_compile_args=extra_compile_args,
                          extra_link_args=extra_link_args,
                          library_dirs=library_dirs,
                          include_dirs=prefs['codegen.cpp.include_dirs'],
                          verbose=0)
             return True
     except Exception as ex:
         logger.warn(('Cannot use weave, a test compilation '
                      'failed: %s (%s)' % (str(ex),
                                           ex.__class__.__name__)) ,
                     'failed_compile_test')
         return False
Example #7
0
 def is_available():
     try:
         with std_silent(False):
             compiler, extra_compile_args = get_compiler_and_args()
             extra_link_args = prefs['codegen.cpp.extra_link_args']
             library_dirs = prefs['codegen.cpp.library_dirs']
             if (platform.system() == 'Linux'
                     and platform.architecture()[0] == '32bit'
                     and platform.machine() == 'x86_64'):
                 # We are cross-compiling to 32bit on a 64bit platform
                 logger.info(
                     'Cross-compiling to 32bit on a 64bit platform, a set '
                     'of standard compiler options will be appended for '
                     'this purpose (note that you need to have a 32bit '
                     'version of the standard library for this to work).',
                     '64bit_to_32bit',
                     once=True)
                 library_dirs += ['/lib32', '/usr/lib32']
                 extra_compile_args += ['-m32']
                 extra_link_args += ['-m32']
             weave.inline('int x=0;', [],
                          compiler=compiler,
                          headers=['<algorithm>', '<limits>'],
                          extra_compile_args=extra_compile_args,
                          extra_link_args=extra_link_args,
                          library_dirs=library_dirs,
                          include_dirs=prefs['codegen.cpp.include_dirs'],
                          verbose=0)
             return True
     except Exception as ex:
         logger.warn(('Cannot use weave, a test compilation '
                      'failed: %s (%s)' % (str(ex), ex.__class__.__name__)),
                     'failed_compile_test')
         return False
Example #8
0
 def run(self):
     if self.compiled_python_pre is not None:
         exec self.compiled_python_pre in self.python_code_namespace
     if self._done_first_run:
         ret_val = self._compiled_func(self.namespace, {})
     else:
         self._inline_args = (self.annotated_code, self.namespace.keys())
         self._inline_kwds = dict(
             local_dict=self.namespace,
             support_code=self.code.support_code,
             compiler=self.compiler,
             headers=self.headers,
             define_macros=self.define_macros,
             libraries=self.libraries,
             extra_compile_args=self.extra_compile_args,
             extra_link_args=self.extra_link_args,
             include_dirs=self.include_dirs,
             library_dirs=self.library_dirs,
             verbose=0)
         with std_silent():
             ret_val = weave.inline(*self._inline_args, **self._inline_kwds)
         self._compiled_func = function_cache[self.annotated_code]
         self._done_first_run = True
     if self.compiled_python_post is not None:
         exec self.compiled_python_post in self.python_code_namespace
     return ret_val
Example #9
0
 def is_available(cls):
     try:
         with std_silent(False):
             compiler, extra_compile_args = get_compiler_and_args()
             extra_link_args = prefs['codegen.cpp.extra_link_args']
             library_dirs = prefs['codegen.cpp.library_dirs']
             update_for_cross_compilation(library_dirs,
                                          extra_compile_args,
                                          extra_link_args,
                                          logger=logger)
             weave.inline('int x=0;', [],
                          compiler=compiler,
                          headers=['<algorithm>', '<limits>'],
                          extra_compile_args=extra_compile_args,
                          extra_link_args=extra_link_args,
                          library_dirs=library_dirs,
                          include_dirs=prefs['codegen.cpp.include_dirs'],
                          verbose=0)
             return True
     except Exception as ex:
         logger.warn(('Cannot use weave, a test compilation '
                      'failed: %s (%s)' % (str(ex),
                                           ex.__class__.__name__)) ,
                     'failed_compile_test')
         return False
Example #10
0
def cpp_evaluator(expr, ns):
    if weave is not None:
        compiler, extra_compile_args = get_compiler_and_args()
        with std_silent():
            return weave.inline('return_val = %s;' % expr, ns.keys(), local_dict=ns,
                                compiler=compiler,
                                extra_compile_args=extra_compile_args,
                                include_dirs=prefs['codegen.cpp.include_dirs']
                                )
    else:
        raise nose.SkipTest('No weave support.')
Example #11
0
def cpp_evaluator(expr, ns):
    compiler, extra_compile_args = get_compiler_and_args()
    with std_silent():
        return weave.inline('return_val = %s;' % expr, ns.keys(), local_dict=ns,
                            support_code=CPPCodeGenerator.universal_support_code,
                            compiler=compiler,
                            extra_compile_args=extra_compile_args,
                            extra_link_args=prefs['codegen.cpp.extra_link_args'],
                            library_dirs=prefs['codegen.cpp.library_dirs'],
                            include_dirs=prefs['codegen.cpp.include_dirs']
                            )
Example #12
0
def _determine_flag_compatibility(compiler, flagname):
    import tempfile
    from distutils.errors import CompileError
    with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f, std_silent():
        f.write('int main (int argc, char **argv) { return 0; }')
        try:
            compiler.compile([f.name], extra_postargs=[flagname])
        except CompileError:
            logger.warn(f'Removing unsupported flag \'{flagname}\' from '
                        f'compiler flags.')
            return False
    return True
Example #13
0
def cpp_evaluator(expr, ns):
    if weave is not None:
        compiler, extra_compile_args = get_compiler_and_args()
        with std_silent():
            return weave.inline('return_val = %s;' % expr,
                                ns.keys(),
                                local_dict=ns,
                                compiler=compiler,
                                extra_compile_args=extra_compile_args,
                                include_dirs=prefs['codegen.cpp.include_dirs'])
    else:
        raise nose.SkipTest('No weave support.')
Example #14
0
 def is_available():
     try:
         with std_silent(False):
             compiler, extra_compile_args = get_compiler_and_args()
             weave.inline('int x=0;', [],
                          compiler=compiler,
                          headers=['<algorithm>', '<limits>'],
                          extra_compile_args=extra_compile_args,
                          verbose=0)
             return True
     except Exception as ex:
         logger.warn(('Cannot use weave, a test compilation '
                      'failed: %s (%s)' % (str(ex), ex.__class__.__name__)),
                     'failed_compile_test')
         return False
Example #15
0
 def is_available():
     try:
         with std_silent(False):
             compiler, extra_compile_args = get_compiler_and_args()
             weave.inline('int x=0;', [],
                          compiler=compiler,
                          headers=['<algorithm>', '<limits>'],
                          extra_compile_args=extra_compile_args,
                          verbose=0)
             return True
     except Exception as ex:
         logger.warn(('Cannot use weave, a test compilation '
                      'failed: %s (%s)' % (str(ex),
                                           ex.__class__.__name__)) ,
                     'failed_compile_test')
         return False
Example #16
0
 def run(self):
     if self.compiled_python_pre is not None:
         exec self.compiled_python_pre in self.python_code_namespace
     with std_silent(self._done_first_run):
         ret_val = weave.inline(self.annotated_code, self.namespace.keys(),
                                local_dict=self.namespace,
                                support_code=self.code.support_code,
                                compiler=self.compiler,
                                headers=['<algorithm>', '<limits>'],
                                extra_compile_args=self.extra_compile_args,
                                include_dirs=self.include_dirs,
                                verbose=0)
     self._done_first_run = True
     if self.compiled_python_post is not None:
         exec self.compiled_python_post in self.python_code_namespace
     return ret_val
Example #17
0
def _determine_flag_compatibility(compiler, flagname):
    import tempfile
    from distutils.errors import CompileError
    with tempfile.TemporaryDirectory(
            prefix='brian_flag_test_') as temp_dir, std_silent():
        fname = os.path.join(temp_dir, 'flag_test.cpp')
        with open(fname, 'wt') as f:
            f.write('int main (int argc, char **argv) { return 0; }')
        try:
            compiler.compile([fname],
                             output_dir=temp_dir,
                             extra_postargs=[flagname])
        except CompileError:
            logger.warn(f"Removing unsupported flag '{flagname}' from "
                        f'compiler flags.')
            return False
    return True
Example #18
0
def cpp_evaluator(expr, ns):
    compiler, extra_compile_args = get_compiler_and_args()
    library_dirs = prefs['codegen.cpp.library_dirs']
    extra_link_args = prefs['codegen.cpp.extra_link_args']
    update_for_cross_compilation(library_dirs, extra_compile_args,
                                 extra_link_args)
    with std_silent():
        return weave.inline(
            'return_val = %s;' % expr,
            list(ns),
            local_dict=ns,
            support_code=CPPCodeGenerator.universal_support_code,
            compiler=compiler,
            extra_compile_args=extra_compile_args,
            extra_link_args=extra_link_args,
            library_dirs=library_dirs,
            include_dirs=prefs['codegen.cpp.include_dirs'])
Example #19
0
 def run(self):
     if self.compiled_python_pre is not None:
         exec self.compiled_python_pre in self.python_code_namespace
     with std_silent(self._done_first_run):
         ret_val = weave.inline(self.annotated_code, self.namespace.keys(),
                                local_dict=self.namespace,
                                support_code=self.code.support_code,
                                compiler=self.compiler,
                                headers=self.headers,
                                define_macros=self.define_macros,
                                libraries=self.libraries,
                                extra_compile_args=self.extra_compile_args,
                                extra_link_args=self.extra_link_args,
                                include_dirs=self.include_dirs,
                                library_dirs=self.library_dirs,
                                verbose=0)
     self._done_first_run = True
     if self.compiled_python_post is not None:
         exec self.compiled_python_post in self.python_code_namespace
     return ret_val
Example #20
0
def cpp_evaluator(expr, ns):
    compiler, extra_compile_args = get_compiler_and_args()
    library_dirs = prefs['codegen.cpp.library_dirs']
    extra_link_args = prefs['codegen.cpp.extra_link_args']
    if (platform.system() == 'Linux' and
                platform.architecture()[0] == '32bit' and
                platform.machine() == 'x86_64'):
        # TODO: This should be refactored, it is repeated in several places
        library_dirs += ['/lib32', '/usr/lib32']
        extra_compile_args += ['-m32']
        extra_link_args += ['-m32']
    with std_silent():
        return weave.inline('return_val = %s;' % expr, ns.keys(), local_dict=ns,
                            support_code=CPPCodeGenerator.universal_support_code,
                            compiler=compiler,
                            extra_compile_args=extra_compile_args,
                            extra_link_args=extra_link_args,
                            library_dirs=library_dirs,
                            include_dirs=prefs['codegen.cpp.include_dirs']
                            )
Example #21
0
def example_run(debug=False, **build_options):
    '''
    Run a simple example simulation that test whether the Brian2/Brian2GeNN/GeNN
    pipeline is working correctly.

    Parameters
    ----------
    debug : bool
        Whether to display debug information (e.g. compilation output) during
        the run. Defaults to ``False``.
    build_options : dict
        Additional options that will be forwarded to the ``set_device`` call,
        e.g. ``use_GPU=False``.
    '''
    from brian2.devices.device import set_device, reset_device
    from brian2 import ms, NeuronGroup, run
    from brian2.utils.logger import std_silent
    import numpy as np
    from numpy.testing import assert_allclose
    from tempfile import mkdtemp
    import shutil
    with std_silent(debug):
        test_dir = mkdtemp(prefix='brian2genn_test')
        set_device('genn', directory=test_dir, debug=debug, **build_options)
        N = 100
        tau = 10 * ms
        eqs = '''
        dV/dt = -V/tau: 1
        '''
        G = NeuronGroup(N,
                        eqs,
                        threshold='V>1',
                        reset='V=0',
                        refractory=5 * ms,
                        method='linear')
        G.V = 'i/100.'
        run(1 * ms)
        assert_allclose(G.V, np.arange(100) / 100. * np.exp(-1 * ms / tau))
        shutil.rmtree(test_dir, ignore_errors=True)
        reset_device()
    print('Example run was successful.')
Example #22
0
def example_run(debug=False, **build_options):
    '''
    Run a simple example simulation that test whether the Brian2/Brian2GeNN/GeNN
    pipeline is working correctly.

    Parameters
    ----------
    debug : bool
        Whether to display debug information (e.g. compilation output) during
        the run. Defaults to ``False``.
    build_options : dict
        Additional options that will be forwarded to the ``set_device`` call,
        e.g. ``use_GPU=False``.
    '''
    from brian2.devices.device import set_device, reset_device
    from brian2 import ms, NeuronGroup, run
    from brian2.utils.logger import std_silent
    import numpy as np
    from numpy.testing import assert_allclose
    from tempfile import mkdtemp
    import shutil
    with std_silent(debug):
        test_dir = mkdtemp(prefix='brian2genn_test')
        set_device('genn', directory=test_dir, debug=debug, **build_options)
        N = 100
        tau = 10*ms
        eqs = '''
        dV/dt = -V/tau: 1
        '''
        G = NeuronGroup(N, eqs, threshold='V>1', reset='V=0', refractory=5 * ms,
                        method='linear')
        G.V = 'i/100.'
        run(1*ms)
        assert_allclose(G.V, np.arange(100)/100.*np.exp(-1*ms/tau))
        shutil.rmtree(test_dir, ignore_errors=True)
        reset_device()
    print('Example run was successful.')
Example #23
0
    def _load_module(self, module_path, define_macros, include_dirs,
                     library_dirs, extra_compile_args, extra_link_args,
                     libraries, code, lib_dir, module_name,
                     runtime_library_dirs, compiler, key, sources):
        have_module = os.path.isfile(module_path)

        if not have_module:
            if define_macros is None:
                define_macros = []
            if include_dirs is None:
                include_dirs = []
            if library_dirs is None:
                library_dirs = []
            if extra_compile_args is None:
                extra_compile_args = []
            if extra_link_args is None:
                extra_link_args = []
            if libraries is None:
                libraries = []

            c_include_dirs = include_dirs
            if 'numpy' in code:
                import numpy
                c_include_dirs.append(numpy.get_include())

            # TODO: We should probably have a special folder just for header
            # files that are shared between different codegen targets
            import brian2.synapses as synapses
            synapses_dir = os.path.dirname(synapses.__file__)
            c_include_dirs.append(synapses_dir)

            pyx_file = os.path.join(lib_dir, module_name + '.pyx')
            # ignore Python 3 unicode stuff for the moment
            #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
            #with io.open(pyx_file, 'w', encoding='utf-8') as f:
            #    f.write(code)
            with open(pyx_file, 'w') as f:
                f.write(code)

            update_for_cross_compilation(library_dirs,
                                         extra_compile_args,
                                         extra_link_args,
                                         logger=logger)
            for source in sources:
                if not source.lower().endswith('.pyx'):
                    raise ValueError('Additional Cython source files need to '
                                     'have an .pyx ending')
                # Copy source and header file (if present) to library directory
                shutil.copyfile(
                    source, os.path.join(lib_dir, os.path.basename(source)))
                name_without_ext = os.path.splitext(
                    os.path.basename(source))[0]
                header_name = name_without_ext + '.pxd'
                if os.path.exists(
                        os.path.join(os.path.dirname(source), header_name)):
                    shutil.copyfile(
                        os.path.join(os.path.dirname(source), header_name),
                        os.path.join(lib_dir, header_name))
            final_sources = [
                os.path.join(lib_dir, os.path.basename(source))
                for source in sources
            ]
            extension = Extension(name=module_name,
                                  sources=[pyx_file],
                                  define_macros=define_macros,
                                  include_dirs=c_include_dirs,
                                  library_dirs=library_dirs,
                                  runtime_library_dirs=runtime_library_dirs,
                                  extra_compile_args=extra_compile_args,
                                  extra_link_args=extra_link_args,
                                  libraries=libraries,
                                  language='c++')
            build_extension = self._get_build_extension(compiler=compiler)
            try:
                opts = dict(
                    quiet=True,
                    annotate=False,
                    force=True,
                )
                # suppresses the output on stdout
                with std_silent():
                    build_extension.extensions = Cython_Build.cythonize(
                        [extension] + final_sources, **opts)

                    build_extension.build_temp = os.path.dirname(pyx_file)
                    build_extension.build_lib = lib_dir
                    build_extension.run()
                    if prefs['codegen.runtime.cython.delete_source_files']:
                        # we can delete the source files to save disk space
                        cpp_file = os.path.join(lib_dir, module_name + '.cpp')
                        try:
                            os.remove(pyx_file)
                            os.remove(cpp_file)
                            temp_dir = os.path.join(
                                lib_dir,
                                os.path.dirname(pyx_file)[1:],
                                module_name + '.*')
                            for fname in glob.glob(temp_dir):
                                os.remove(fname)
                        except (OSError, IOError) as ex:
                            logger.debug(
                                'Deleting Cython source files failed with error: %s'
                                % str(ex))

            except Cython_Compiler.Errors.CompileError:
                return
        # Temporarily insert the Cython directory to the Python path so that
        # code importing from an external module that was declared via
        # sources works
        sys.path.insert(0, lib_dir)
        spec = importlib.util.spec_from_file_location(module_name, module_path)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        sys.path.pop(0)
        self._code_cache[key] = module
        return module
Example #24
0
    def build(self, directory='output',
              compile=True, run=True, debug=False, clean=True,
              with_output=True, native=True,
              additional_source_files=None, additional_header_files=None,
              main_includes=None, run_includes=None,
              run_args=None, **kwds):
        '''
        Build the project
        
        TODO: more details
        
        Parameters
        ----------
        directory : str
            The output directory to write the project to, any existing files will be overwritten.
        compile : bool
            Whether or not to attempt to compile the project
        run : bool
            Whether or not to attempt to run the built project if it successfully builds.
        debug : bool
            Whether to compile in debug mode.
        with_output : bool
            Whether or not to show the ``stdout`` of the built program when run.
        native : bool
            Whether or not to compile for the current machine's architecture (best for speed, but not portable)
        clean : bool
            Whether or not to clean the project before building
        additional_source_files : list of str
            A list of additional ``.cpp`` files to include in the build.
        additional_header_files : list of str
            A list of additional ``.h`` files to include in the build.
        main_includes : list of str
            A list of additional header files to include in ``main.cpp``.
        run_includes : list of str
            A list of additional header files to include in ``run.cpp``.
        '''
        renames = {'project_dir': 'directory',
                   'compile_project': 'compile',
                   'run_project': 'run'}
        if len(kwds):
            msg = ''
            for kwd in kwds:
                if kwd in renames:
                    msg += ("Keyword argument '%s' has been renamed to "
                            "'%s'. ") % (kwd, renames[kwd])
                else:
                    msg += "Unknown keyword argument '%s'. " % kwd
            raise TypeError(msg)

        if additional_source_files is None:
            additional_source_files = []
        if additional_header_files is None:
            additional_header_files = []
        if main_includes is None:
            main_includes = []
        if run_includes is None:
            run_includes = []
        if run_args is None:
            run_args = []
        self.project_dir = directory
        ensure_directory(directory)

        compiler, extra_compile_args = get_compiler_and_args()
        compiler_flags = ' '.join(extra_compile_args)
        
        for d in ['code_objects', 'results', 'static_arrays']:
            ensure_directory(os.path.join(directory, d))
            
        writer = CPPWriter(directory)
        
        # Get the number of threads if specified in an openmp context
        nb_threads = prefs.devices.cpp_standalone.openmp_threads
        # If the number is negative, we need to throw an error
        if (nb_threads < 0):
            raise ValueError('The number of OpenMP threads can not be negative !') 

        logger.debug("Writing C++ standalone project to directory "+os.path.normpath(directory))
        if nb_threads > 0:
            logger.warn("OpenMP code is not yet well tested, and may be inaccurate.", "openmp", once=True)
            logger.debug("Using OpenMP with %d threads " % nb_threads)
            for codeobj in self.code_objects.itervalues():
                if not 'IS_OPENMP_COMPATIBLE' in codeobj.template_source:
                    raise RuntimeError(("Code object '%s' uses the template %s "
                                        "which is not compatible with "
                                        "OpenMP.") % (codeobj.name,
                                                      codeobj.template_name))
        arange_arrays = sorted([(var, start)
                                for var, start in self.arange_arrays.iteritems()],
                               key=lambda (var, start): var.name)

        # # Find np arrays in the namespaces and convert them into static
        # # arrays. Hopefully they are correctly used in the code: For example,
        # # this works for the namespaces for functions with C++ (e.g. TimedArray
        # # treats it as a C array) but does not work in places that are
        # # implicitly vectorized (state updaters, resets, etc.). But arrays
        # # shouldn't be used there anyway.
        for code_object in self.code_objects.itervalues():
            for name, value in code_object.variables.iteritems():
                if isinstance(value, np.ndarray):
                    self.static_arrays[name] = value

        # write the static arrays
        logger.debug("static arrays: "+str(sorted(self.static_arrays.keys())))
        static_array_specs = []
        for name, arr in sorted(self.static_arrays.items()):
            arr.tofile(os.path.join(directory, 'static_arrays', name))
            static_array_specs.append((name, c_data_type(arr.dtype), arr.size, name))

        # Write the global objects
        networks = [net() for net in Network.__instances__()
                    if net().name != '_fake_network']
        synapses = []
        for net in networks:
            net_synapses = [s for s in net.objects if isinstance(s, Synapses)]
            synapses.extend(net_synapses)
            # We don't currently support pathways with scalar delays
            for synapse_obj in net_synapses:
                for pathway in synapse_obj._pathways:
                    if not isinstance(pathway.variables['delay'],
                                      DynamicArrayVariable):
                        error_msg = ('The "%s" pathway  uses a scalar '
                                     'delay (instead of a delay per synapse). '
                                     'This is not yet supported. Do not '
                                     'specify a delay in the Synapses(...) '
                                     'call but instead set its delay attribute '
                                     'afterwards.') % (pathway.name)
                        raise NotImplementedError(error_msg)

        # Not sure what the best place is to call Network.after_run -- at the
        # moment the only important thing it does is to clear the objects stored
        # in magic_network. If this is not done, this might lead to problems
        # for repeated runs of standalone (e.g. in the test suite).
        for net in networks:
            net.after_run()

        arr_tmp = CPPStandaloneCodeObject.templater.objects(
                        None, None,
                        array_specs=self.arrays,
                        dynamic_array_specs=self.dynamic_arrays,
                        dynamic_array_2d_specs=self.dynamic_arrays_2d,
                        zero_arrays=self.zero_arrays,
                        arange_arrays=arange_arrays,
                        synapses=synapses,
                        clocks=self.clocks,
                        static_array_specs=static_array_specs,
                        networks=networks)
        writer.write('objects.*', arr_tmp)

        main_lines = []
        procedures = [('', main_lines)]
        runfuncs = {}
        for func, args in self.main_queue:
            if func=='run_code_object':
                codeobj, = args
                main_lines.append('_run_%s();' % codeobj.name)
            elif func=='run_network':
                net, netcode = args
                main_lines.extend(netcode)
            elif func=='set_by_array':
                arrayname, staticarrayname = args
                code = '''
                {pragma}
                for(int i=0; i<_num_{staticarrayname}; i++)
                {{
                    {arrayname}[i] = {staticarrayname}[i];
                }}
                '''.format(arrayname=arrayname, staticarrayname=staticarrayname, pragma=openmp_pragma('static'))
                main_lines.extend(code.split('\n'))
            elif func=='set_by_single_value':
                arrayname, item, value = args
                code = '{arrayname}[{item}] = {value};'.format(arrayname=arrayname,
                                                               item=item,
                                                               value=value)
                main_lines.extend([code])
            elif func=='set_array_by_array':
                arrayname, staticarrayname_index, staticarrayname_value = args
                code = '''
                {pragma}
                for(int i=0; i<_num_{staticarrayname_index}; i++)
                {{
                    {arrayname}[{staticarrayname_index}[i]] = {staticarrayname_value}[i];
                }}
                '''.format(arrayname=arrayname, staticarrayname_index=staticarrayname_index,
                           staticarrayname_value=staticarrayname_value, pragma=openmp_pragma('static'))
                main_lines.extend(code.split('\n'))
            elif func=='insert_code':
                main_lines.append(args)
            elif func=='start_run_func':
                name, include_in_parent = args
                if include_in_parent:
                    main_lines.append('%s();' % name)
                main_lines = []
                procedures.append((name, main_lines))
            elif func=='end_run_func':
                name, include_in_parent = args
                name, main_lines = procedures.pop(-1)
                runfuncs[name] = main_lines
                name, main_lines = procedures[-1]
            else:
                raise NotImplementedError("Unknown main queue function type "+func)

        # generate the finalisations
        for codeobj in self.code_objects.itervalues():
            if hasattr(codeobj.code, 'main_finalise'):
                main_lines.append(codeobj.code.main_finalise)

        # Generate data for non-constant values
        code_object_defs = defaultdict(list)
        for codeobj in self.code_objects.itervalues():
            lines = []
            for k, v in codeobj.variables.iteritems():
                if isinstance(v, AttributeVariable):
                    # We assume all attributes are implemented as property-like methods
                    line = 'const {c_type} {varname} = {objname}.{attrname}();'
                    lines.append(line.format(c_type=c_data_type(v.dtype), varname=k, objname=v.obj.name,
                                             attrname=v.attribute))
                elif isinstance(v, ArrayVariable):
                    try:
                        if isinstance(v, DynamicArrayVariable):
                            if v.dimensions == 1:
                                dyn_array_name = self.dynamic_arrays[v]
                                array_name = self.arrays[v]
                                line = '{c_type}* const {array_name} = &{dyn_array_name}[0];'
                                line = line.format(c_type=c_data_type(v.dtype), array_name=array_name,
                                                   dyn_array_name=dyn_array_name)
                                lines.append(line)
                                line = 'const int _num{k} = {dyn_array_name}.size();'
                                line = line.format(k=k, dyn_array_name=dyn_array_name)
                                lines.append(line)
                        else:
                            lines.append('const int _num%s = %s;' % (k, v.size))
                    except TypeError:
                        pass
            for line in lines:
                # Sometimes an array is referred to by to different keys in our
                # dictionary -- make sure to never add a line twice
                if not line in code_object_defs[codeobj.name]:
                    code_object_defs[codeobj.name].append(line)

        # Generate the code objects
        for codeobj in self.code_objects.itervalues():
            ns = codeobj.variables
            # TODO: fix these freeze/CONSTANTS hacks somehow - they work but not elegant.
            code = freeze(codeobj.code.cpp_file, ns)
            code = code.replace('%CONSTANTS%', '\n'.join(code_object_defs[codeobj.name]))
            code = '#include "objects.h"\n'+code
            
            writer.write('code_objects/'+codeobj.name+'.cpp', code)
            writer.write('code_objects/'+codeobj.name+'.h', codeobj.code.h_file)
                    
        # The code_objects are passed in the right order to run them because they were
        # sorted by the Network object. To support multiple clocks we'll need to be
        # smarter about that.
        main_tmp = CPPStandaloneCodeObject.templater.main(None, None,
                                                          main_lines=main_lines,
                                                          code_objects=self.code_objects.values(),
                                                          report_func=self.report_func,
                                                          dt=float(defaultclock.dt),
                                                          additional_headers=main_includes,
                                                          )
        writer.write('main.cpp', main_tmp)

        if compiler=='msvc':
            std_move = 'std::move'
        else:
            std_move = ''
        network_tmp = CPPStandaloneCodeObject.templater.network(None, None,
                                                             std_move=std_move)
        writer.write('network.*', network_tmp)

        synapses_classes_tmp = CPPStandaloneCodeObject.templater.synapses_classes(None, None)
        writer.write('synapses_classes.*', synapses_classes_tmp)
        
        # Generate the run functions
        run_tmp = CPPStandaloneCodeObject.templater.run(None, None, run_funcs=runfuncs,
                                                        code_objects=self.code_objects.values(),
                                                        additional_headers=run_includes,
                                                        )
        writer.write('run.*', run_tmp)

        # Copy the brianlibdirectory
        brianlib_dir = os.path.join(os.path.split(inspect.getsourcefile(CPPStandaloneCodeObject))[0],
                                    'brianlib')
        brianlib_files = copy_directory(brianlib_dir, os.path.join(directory, 'brianlib'))
        for file in brianlib_files:
            if file.lower().endswith('.cpp'):
                writer.source_files.append('brianlib/'+file)
            elif file.lower().endswith('.h'):
                writer.header_files.append('brianlib/'+file)

        # Copy the CSpikeQueue implementation
        shutil.copy2(os.path.join(os.path.split(inspect.getsourcefile(Synapses))[0], 'cspikequeue.cpp'),
                     os.path.join(directory, 'brianlib', 'spikequeue.h'))
        shutil.copy2(os.path.join(os.path.split(inspect.getsourcefile(Synapses))[0], 'stdint_compat.h'),
                     os.path.join(directory, 'brianlib', 'stdint_compat.h'))
        
        writer.source_files.extend(additional_source_files)
        writer.header_files.extend(additional_header_files)

        if compiler=='msvc':
            if native:
                arch_flag = ''
                try:
                    from cpuinfo import cpuinfo
                    res = cpuinfo.get_cpu_info()
                    if 'sse' in res['flags']:
                        arch_flag = '/arch:SSE'
                    if 'sse2' in res['flags']:
                        arch_flag = '/arch:SSE2'
                except ImportError:
                    logger.warn('Native flag for MSVC compiler requires installation of the py-cpuinfo module')
                compiler_flags += ' '+arch_flag
            
            if nb_threads>1:
                openmp_flag = '/openmp'
            else:
                openmp_flag = ''
            # Generate the visual studio makefile
            source_bases = [fname.replace('.cpp', '').replace('/', '\\') for fname in writer.source_files]
            win_makefile_tmp = CPPStandaloneCodeObject.templater.win_makefile(
                None, None,
                source_bases=source_bases,
                compiler_flags=compiler_flags,
                openmp_flag=openmp_flag,
                )
            writer.write('win_makefile', win_makefile_tmp)
        else:
            # Generate the makefile
            if os.name=='nt':
                rm_cmd = 'del *.o /s\n\tdel main.exe $(DEPS)'
            else:
                rm_cmd = 'rm $(OBJS) $(PROGRAM) $(DEPS)'
            makefile_tmp = CPPStandaloneCodeObject.templater.makefile(None, None,
                source_files=' '.join(writer.source_files),
                header_files=' '.join(writer.header_files),
                compiler_flags=compiler_flags,
                rm_cmd=rm_cmd)
            writer.write('makefile', makefile_tmp)
        
        # build the project
        if compile:
            with in_directory(directory):
                if compiler=='msvc':
                    # TODO: handle debug
                    if debug:
                        logger.warn('Debug flag currently ignored for MSVC')
                    vcvars_search_paths = [
                        # futureproofing!
                        r'c:\Program Files\Microsoft Visual Studio 15.0\VC\vcvarsall.bat',
                        r'c:\Program Files (x86)\Microsoft Visual Studio 15.0\VC\vcvarsall.bat',
                        r'c:\Program Files\Microsoft Visual Studio 14.0\VC\vcvarsall.bat',
                        r'c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat',
                        r'c:\Program Files\Microsoft Visual Studio 13.0\VC\vcvarsall.bat',
                        r'c:\Program Files (x86)\Microsoft Visual Studio 13.0\VC\vcvarsall.bat',
                        r'c:\Program Files\Microsoft Visual Studio 12.0\VC\vcvarsall.bat',
                        r'c:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat',
                        r'c:\Program Files\Microsoft Visual Studio 11.0\VC\vcvarsall.bat',
                        r'c:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\vcvarsall.bat',
                        r'c:\Program Files\Microsoft Visual Studio 10.0\VC\vcvarsall.bat',
                        r'c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\vcvarsall.bat',
                        ]
                    vcvars_loc = prefs['codegen.cpp.msvc_vars_location']
                    if vcvars_loc=='':
                        for fname in vcvars_search_paths:
                            if os.path.exists(fname):
                                vcvars_loc = fname
                                break
                    if vcvars_loc=='':
                        raise IOError("Cannot find vcvarsall.bat on standard search path.")
                    # TODO: copy vcvars and make replacements for 64 bit automatically
                    arch_name = prefs['codegen.cpp.msvc_architecture']
                    if arch_name=='':
                        mach = platform.machine()
                        if mach=='AMD64':
                            arch_name = 'x86_amd64'
                        else:
                            arch_name = 'x86'
                    
                    vcvars_cmd = '"{vcvars_loc}" {arch_name}'.format(
                            vcvars_loc=vcvars_loc, arch_name=arch_name)
                    make_cmd = 'nmake /f win_makefile'
                    if os.path.exists('winmake.log'):
                        os.remove('winmake.log')
                    with std_silent(debug):
                        if clean:
                            os.system('%s >>winmake.log 2>&1 && %s clean >>winmake.log 2>&1' % (vcvars_cmd, make_cmd))
                        x = os.system('%s >>winmake.log 2>&1 && %s >>winmake.log 2>&1' % (vcvars_cmd, make_cmd))
                        if x!=0:
                            raise RuntimeError("Project compilation failed")
                else:
                    with std_silent(debug):
                        if clean:
                            os.system('make clean')
                        if debug:
                            x = os.system('make debug')
                        elif native:
                            x = os.system('make native')
                        else:
                            x = os.system('make')
                        if x!=0:
                            raise RuntimeError("Project compilation failed")
                if run:
                    if not with_output:
                        stdout = open(os.devnull, 'w')
                    else:
                        stdout = None
                    if os.name=='nt':
                        x = subprocess.call(['main'] + run_args, stdout=stdout)
                    else:
                        x = subprocess.call(['./main'] + run_args, stdout=stdout)
                    if x:
                        raise RuntimeError("Project run failed")
                    self.has_been_run = True
Example #25
0
    def _load_module(self, module_path, define_macros, include_dirs,
                     library_dirs, extra_compile_args, extra_link_args,
                     libraries, code, lib_dir, module_name,
                     runtime_library_dirs, compiler, key):
        have_module = os.path.isfile(module_path)

        if not have_module:
            if define_macros is None:
                define_macros = []
            if include_dirs is None:
                include_dirs = []
            if library_dirs is None:
                library_dirs = []
            if extra_compile_args is None:
                extra_compile_args = []
            if extra_link_args is None:
                extra_link_args = []
            if libraries is None:
                libraries = []

            c_include_dirs = include_dirs
            if 'numpy' in code:
                import numpy
                c_include_dirs.append(numpy.get_include())

            # TODO: We should probably have a special folder just for header
            # files that are shared between different codegen targets
            import brian2.synapses as synapses
            synapses_dir = os.path.dirname(synapses.__file__)
            c_include_dirs.append(synapses_dir)

            pyx_file = os.path.join(lib_dir, module_name + '.pyx')
            # ignore Python 3 unicode stuff for the moment
            #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
            #with io.open(pyx_file, 'w', encoding='utf-8') as f:
            #    f.write(code)
            with open(pyx_file, 'w') as f:
                f.write(code)

            update_for_cross_compilation(library_dirs,
                                         extra_compile_args,
                                         extra_link_args,
                                         logger=logger)

            extension = Extension(
                name=module_name,
                sources=[pyx_file],
                define_macros=define_macros,
                include_dirs=c_include_dirs,
                library_dirs=library_dirs,
                runtime_library_dirs=runtime_library_dirs,
                extra_compile_args=extra_compile_args,
                extra_link_args=extra_link_args,
                libraries=libraries,
                language='c++',
            )
            build_extension = self._get_build_extension(compiler=compiler)
            try:
                opts = dict(
                    quiet=True,
                    annotate=False,
                    force=True,
                )
                # suppresses the output on stdout
                with std_silent():
                    build_extension.extensions = Cython_Build.cythonize(
                        [extension], **opts)

                    build_extension.build_temp = os.path.dirname(pyx_file)
                    build_extension.build_lib = lib_dir
                    build_extension.run()
            except Cython_Compiler.Errors.CompileError:
                return

        module = imp.load_dynamic(module_name, module_path)
        self._code_cache[key] = module
        return module
Example #26
0
    def create_extension(self, code, force=False, name=None,
                         include=None, library_dirs=None, compile_args=None, link_args=None, lib=None,
                         ):

        if Cython is None:
            raise ImportError('Cython is not available')

        code = deindent(code)

        lib_dir = os.path.expanduser('~/.brian/cython_extensions')
        if not os.path.exists(lib_dir):
            os.makedirs(lib_dir)

        key = code, sys.version_info, sys.executable, Cython.__version__
            
        if force:
            # Force a new module name by adding the current time to the
            # key which is hashed to determine the module name.
            key += time.time(),            

        if key in self._code_cache:
            return self._code_cache[key]

        if name is not None:
            module_name = name#py3compat.unicode_to_str(args.name)
        else:
            module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()



        module_path = os.path.join(lib_dir, module_name + self.so_ext)
        
        have_module = os.path.isfile(module_path)
        
        if not have_module:
            if include is None:
                include = []
            if library_dirs is None:
                library_dirs = []
            if compile_args is None:
                compile_args = []
            if link_args is None:
                link_args = []
            if lib is None:
                lib = []
                
            c_include_dirs = include
            if 'numpy' in code:
                import numpy
                c_include_dirs.append(numpy.get_include())
            pyx_file = os.path.join(lib_dir, module_name + '.pyx')
            # ignore Python 3 unicode stuff for the moment
            #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
            #with io.open(pyx_file, 'w') as f:#, encoding='utf-8') as f:
            #    f.write(code)
            open(pyx_file, 'w').write(code)

            extension = Extension(
                name=module_name,
                sources=[pyx_file],
                include_dirs=c_include_dirs,
                library_dirs=library_dirs,
                extra_compile_args=compile_args,
                extra_link_args=link_args,
                libraries=lib,
                language='c++',
                )
            build_extension = self._get_build_extension()
            try:
                opts = dict(
                    quiet=True,
                    annotate=False,
                    force=True,
                    )
                # suppresses the output on stdout
                with std_silent():
                    build_extension.extensions = Cython_Build.cythonize([extension], **opts)

                    build_extension.build_temp = os.path.dirname(pyx_file)
                    build_extension.build_lib = lib_dir
                    build_extension.run()
            except Cython_Compiler.Errors.CompileError:
                return

        module = imp.load_dynamic(module_name, module_path)
        self._code_cache[key] = module
        return module
Example #27
0
 def compile_source(self, directory, compiler, debug, clean, native):
     with in_directory(directory):
         if compiler=='msvc':
             # TODO: handle debug
             if debug:
                 logger.warn('Debug flag currently ignored for MSVC')
             vcvars_search_paths = [
                 # futureproofing!
                 r'c:\Program Files\Microsoft Visual Studio 15.0\VC\vcvarsall.bat',
                 r'c:\Program Files (x86)\Microsoft Visual Studio 15.0\VC\vcvarsall.bat',
                 r'c:\Program Files\Microsoft Visual Studio 14.0\VC\vcvarsall.bat',
                 r'c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat',
                 r'c:\Program Files\Microsoft Visual Studio 13.0\VC\vcvarsall.bat',
                 r'c:\Program Files (x86)\Microsoft Visual Studio 13.0\VC\vcvarsall.bat',
                 r'c:\Program Files\Microsoft Visual Studio 12.0\VC\vcvarsall.bat',
                 r'c:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat',
                 r'c:\Program Files\Microsoft Visual Studio 11.0\VC\vcvarsall.bat',
                 r'c:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\vcvarsall.bat',
                 r'c:\Program Files\Microsoft Visual Studio 10.0\VC\vcvarsall.bat',
                 r'c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\vcvarsall.bat',
                 ]
             vcvars_loc = prefs['codegen.cpp.msvc_vars_location']
             if vcvars_loc=='':
                 for fname in vcvars_search_paths:
                     if os.path.exists(fname):
                         vcvars_loc = fname
                         break
             if vcvars_loc=='':
                 raise IOError("Cannot find vcvarsall.bat on standard search path.")
             # TODO: copy vcvars and make replacements for 64 bit automatically
             arch_name = prefs['codegen.cpp.msvc_architecture']
             if arch_name=='':
                 mach = platform.machine()
                 if mach=='AMD64':
                     arch_name = 'x86_amd64'
                 else:
                     arch_name = 'x86'
             
             vcvars_cmd = '"{vcvars_loc}" {arch_name}'.format(
                     vcvars_loc=vcvars_loc, arch_name=arch_name)
             make_cmd = 'nmake /f win_makefile'
             if os.path.exists('winmake.log'):
                 os.remove('winmake.log')
             with std_silent(debug):
                 if clean:
                     os.system('%s >>winmake.log 2>&1 && %s clean >>winmake.log 2>&1' % (vcvars_cmd, make_cmd))
                 x = os.system('%s >>winmake.log 2>&1 && %s >>winmake.log 2>&1' % (vcvars_cmd, make_cmd))
                 if x!=0:
                     raise RuntimeError("Project compilation failed")
         else:
             with std_silent(debug):
                 if clean:
                     os.system('make clean')
                 if debug:
                     x = os.system('make debug')
                 elif native:
                     x = os.system('make native')
                 else:
                     x = os.system('make')
                 if x!=0:
                     raise RuntimeError("Project compilation failed")
Example #28
0
    def _load_module(self, module_path, include_dirs, library_dirs,
                     extra_compile_args, extra_link_args, libraries, code,
                     lib_dir, module_name, runtime_library_dirs, compiler,
                     key):
        have_module = os.path.isfile(module_path)

        if not have_module:
            if include_dirs is None:
                include_dirs = []
            if library_dirs is None:
                library_dirs = []
            if extra_compile_args is None:
                extra_compile_args = []
            if extra_link_args is None:
                extra_link_args = []
            if libraries is None:
                libraries = []

            c_include_dirs = include_dirs
            if 'numpy' in code:
                import numpy
                c_include_dirs.append(numpy.get_include())

            # TODO: We should probably have a special folder just for header
            # files that are shared between different codegen targets
            import brian2.synapses as synapses
            synapses_dir = os.path.dirname(synapses.__file__)
            c_include_dirs.append(synapses_dir)

            pyx_file = os.path.join(lib_dir, module_name + '.pyx')
            # ignore Python 3 unicode stuff for the moment
            #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
            #with io.open(pyx_file, 'w') as f:#, encoding='utf-8') as f:
            #    f.write(code)
            open(pyx_file, 'w').write(code)

            extension = Extension(
                name=module_name,
                sources=[pyx_file],
                include_dirs=c_include_dirs,
                library_dirs=library_dirs,
                runtime_library_dirs=runtime_library_dirs,
                extra_compile_args=extra_compile_args,
                extra_link_args=extra_link_args,
                libraries=libraries,
                language='c++',
                )
            build_extension = self._get_build_extension(compiler=compiler)
            try:
                opts = dict(
                    quiet=True,
                    annotate=False,
                    force=True,
                    )
                # suppresses the output on stdout
                with std_silent():
                    build_extension.extensions = Cython_Build.cythonize([extension], **opts)

                    build_extension.build_temp = os.path.dirname(pyx_file)
                    build_extension.build_lib = lib_dir
                    build_extension.run()
            except Cython_Compiler.Errors.CompileError:
                return

        module = imp.load_dynamic(module_name, module_path)
        self._code_cache[key] = module
        return module
    def _load_module(self, module_path, include_dirs, library_dirs,
                     extra_compile_args, extra_link_args, libraries, code,
                     lib_dir, module_name, runtime_library_dirs, compiler,
                     key):
        have_module = os.path.isfile(module_path)

        if not have_module:
            if include_dirs is None:
                include_dirs = []
            if library_dirs is None:
                library_dirs = []
            if extra_compile_args is None:
                extra_compile_args = []
            if extra_link_args is None:
                extra_link_args = []
            if libraries is None:
                libraries = []

            c_include_dirs = include_dirs
            if 'numpy' in code:
                import numpy
                c_include_dirs.append(numpy.get_include())

            # TODO: We should probably have a special folder just for header
            # files that are shared between different codegen targets
            import brian2.synapses as synapses
            synapses_dir = os.path.dirname(synapses.__file__)
            c_include_dirs.append(synapses_dir)

            pyx_file = os.path.join(lib_dir, module_name + '.pyx')
            # ignore Python 3 unicode stuff for the moment
            #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
            #with io.open(pyx_file, 'w') as f:#, encoding='utf-8') as f:
            #    f.write(code)
            open(pyx_file, 'w').write(code)

            if (platform.system() == 'Linux' and
                        platform.architecture()[0] == '32bit' and
                        platform.machine() == 'x86_64'):
                # We are cross-compiling to 32bit on a 64bit platform
                logger.info('Cross-compiling to 32bit on a 64bit platform, a set '
                            'of standard compiler options will be appended for '
                            'this purpose (note that you need to have a 32bit '
                            'version of the standard library for this to work).',
                            '64bit_to_32bit',
                            once=True)
                library_dirs += ['/lib32', '/usr/lib32']
                extra_compile_args += ['-m32']
                extra_link_args += ['-m32']

            extension = Extension(
                name=module_name,
                sources=[pyx_file],
                include_dirs=c_include_dirs,
                library_dirs=library_dirs,
                runtime_library_dirs=runtime_library_dirs,
                extra_compile_args=extra_compile_args,
                extra_link_args=extra_link_args,
                libraries=libraries,
                language='c++',
                )
            build_extension = self._get_build_extension(compiler=compiler)
            try:
                opts = dict(
                    quiet=True,
                    annotate=False,
                    force=True,
                    )
                # suppresses the output on stdout
                with std_silent():
                    build_extension.extensions = Cython_Build.cythonize([extension], **opts)

                    build_extension.build_temp = os.path.dirname(pyx_file)
                    build_extension.build_lib = lib_dir
                    build_extension.run()
            except Cython_Compiler.Errors.CompileError:
                return

        module = imp.load_dynamic(module_name, module_path)
        self._code_cache[key] = module
        return module
Example #30
0
    def _load_module(self, module_path, define_macros, include_dirs, library_dirs,
                     extra_compile_args, extra_link_args, libraries, code,
                     lib_dir, module_name, runtime_library_dirs, compiler,
                     key, sources):
        have_module = os.path.isfile(module_path)

        if not have_module:
            if define_macros is None:
                define_macros = []
            if include_dirs is None:
                include_dirs = []
            if library_dirs is None:
                library_dirs = []
            if extra_compile_args is None:
                extra_compile_args = []
            if extra_link_args is None:
                extra_link_args = []
            if libraries is None:
                libraries = []

            c_include_dirs = include_dirs
            if 'numpy' in code:
                import numpy
                c_include_dirs.append(numpy.get_include())

            # TODO: We should probably have a special folder just for header
            # files that are shared between different codegen targets
            import brian2.synapses as synapses
            synapses_dir = os.path.dirname(synapses.__file__)
            c_include_dirs.append(synapses_dir)

            pyx_file = os.path.join(lib_dir, module_name + '.pyx')
            # ignore Python 3 unicode stuff for the moment
            #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
            #with io.open(pyx_file, 'w', encoding='utf-8') as f:
            #    f.write(code)
            with open(pyx_file, 'w') as f:
                f.write(code)

            update_for_cross_compilation(library_dirs,
                                         extra_compile_args,
                                         extra_link_args, logger=logger)
            for source in sources:
                if not source.lower().endswith('.pyx'):
                    raise ValueError('Additional Cython source files need to '
                                     'have an .pyx ending')
                # Copy source and header file (if present) to library directory
                shutil.copyfile(source, os.path.join(lib_dir,
                                                     os.path.basename(source)))
                name_without_ext = os.path.splitext(os.path.basename(source))[0]
                header_name = name_without_ext + '.pxd'
                if os.path.exists(os.path.join(os.path.dirname(source), header_name)):
                    shutil.copyfile(os.path.join(os.path.dirname(source), header_name),
                                    os.path.join(lib_dir, header_name))
            final_sources = [os.path.join(lib_dir, os.path.basename(source))
                             for source in sources]
            extension = Extension(
                name=module_name,
                sources=[pyx_file],
                define_macros=define_macros,
                include_dirs=c_include_dirs,
                library_dirs=library_dirs,
                runtime_library_dirs=runtime_library_dirs,
                extra_compile_args=extra_compile_args,
                extra_link_args=extra_link_args,
                libraries=libraries,
                language='c++')
            build_extension = self._get_build_extension(compiler=compiler)
            try:
                opts = dict(
                    quiet=True,
                    annotate=False,
                    force=True,
                    )
                # suppresses the output on stdout
                with std_silent():
                    build_extension.extensions = Cython_Build.cythonize([extension] + final_sources, **opts)

                    build_extension.build_temp = os.path.dirname(pyx_file)
                    build_extension.build_lib = lib_dir
                    build_extension.run()
                    if prefs['codegen.runtime.cython.delete_source_files']:
                        # we can delete the source files to save disk space
                        cpp_file = os.path.join(lib_dir, module_name + '.cpp')
                        try:
                            os.remove(pyx_file)
                            os.remove(cpp_file)
                            temp_dir = os.path.join(lib_dir, os.path.dirname(pyx_file)[1:], module_name + '.*')
                            for fname in glob.glob(temp_dir):
                                os.remove(fname)
                        except (OSError, IOError) as ex:
                            logger.debug('Deleting Cython source files failed with error: %s' % str(ex))

            except Cython_Compiler.Errors.CompileError:
                return
        # Temporarily insert the Cython directory to the Python path so that
        # code importing from an external module that was declared via
        # sources works
        sys.path.insert(0, lib_dir)
        module = imp.load_dynamic(module_name, module_path)
        sys.path.pop(0)
        self._code_cache[key] = module
        return module