def initialize_asp_mod(self): # Create ASP module GMM.asp_mod = asp_module.ASPModule(use_cuda=GMM.use_cuda, use_cilk=GMM.use_cilk, use_tbb=GMM.use_tbb) if GMM.use_cuda: self.insert_base_code_into_listed_modules(['c++']) self.insert_non_rendered_code_into_cuda_module() self.insert_rendered_code_into_module('cuda') GMM.asp_mod.backends['cuda'].toolchain.cflags.extend(["-Xcompiler","-fPIC","-arch=sm_%s%s" % GMM.platform_info['cuda']['capability'] ]) GMM.asp_mod.backends['c++'].compilable = False # TODO: For now, must force ONLY cuda backend to compile if GMM.use_cilk: self.insert_base_code_into_listed_modules(['cilk']) self.insert_non_rendered_code_into_cilk_module() self.insert_rendered_code_into_module('cilk') GMM.asp_mod.backends['cilk'].toolchain.cc = 'icc' GMM.asp_mod.backends['cilk'].toolchain.cflags = ['-O2','-gcc', '-ip','-fPIC'] if GMM.use_tbb: self.insert_base_code_into_listed_modules(['tbb']) self.insert_non_rendered_code_into_tbb_module() self.insert_rendered_code_into_module('tbb') # Setup toolchain from codepy.libraries import add_numpy, add_boost_python, add_cuda for name, mod in GMM.asp_mod.backends.iteritems(): add_numpy(mod.toolchain) add_boost_python(mod.toolchain) if name in ['cuda']: add_cuda(mod.toolchain) return GMM.asp_mod
def initialize_asp_mod(self): # Create ASP module GMM.asp_mod = asp_module.ASPModule(use_cuda=GMM.use_cuda, use_cilk=GMM.use_cilk) if GMM.use_cuda: print "GMM SPECIALIZER: USING CUDA" self.insert_base_code_into_listed_modules(['c++']) self.insert_non_rendered_code_into_cuda_module() self.insert_rendered_code_into_module('cuda') GMM.asp_mod.backends['cuda'].toolchain.cflags.extend(["-Xcompiler","-fPIC","-arch=sm_%s%s" % GMM.platform_info['cuda']['capability'] ]) # TODO: For now, must force ONLY cuda backend to compile GMM.asp_mod.backends['c++'].compilable = False #print GMM.asp_mod.generate() if GMM.use_cilk: print "GMM SPECIALIZER: USING CILK+" self.insert_base_code_into_listed_modules(['cilk']) self.insert_non_rendered_code_into_cilk_module() self.insert_rendered_code_into_module('cilk') GMM.asp_mod.backends['cilk'].toolchain.cc = 'icc' GMM.asp_mod.backends['cilk'].toolchain.cflags = ['-O2','-gcc', '-ip','-fPIC'] # Setup toolchain from codepy.libraries import add_numpy, add_boost_python, add_cuda for name, mod in GMM.asp_mod.backends.iteritems(): add_numpy(mod.toolchain) add_boost_python(mod.toolchain) if name in ['cuda']: add_cuda(mod.toolchain) return GMM.asp_mod
def compile(self, host_toolchain, nvcc_toolchain, host_kwargs={}, nvcc_kwargs={}, **kwargs): """Return the extension module generated from the code described by *self*. If necessary, build the code using *toolchain* with :func:`codepy.jit.extension_from_string`. Any keyword arguments accept by that latter function may be passed in *kwargs*. """ from codepy.libraries import add_boost_python, add_cuda host_toolchain = host_toolchain.copy() add_boost_python(host_toolchain) add_cuda(host_toolchain) nvcc_toolchain = nvcc_toolchain.copy() add_cuda(nvcc_toolchain) host_code = str(self.boost_module.generate()) + "\n" device_code = str(self.generate()) + "\n" from codepy.jit import compile_from_string, extension_from_string from codepy.jit import link_extension local_host_kwargs = kwargs.copy() local_host_kwargs.update(host_kwargs) local_nvcc_kwargs = kwargs.copy() local_nvcc_kwargs.update(nvcc_kwargs) # Don't compile shared objects, just normal objects # (on some platforms, they're different) host_checksum, host_mod_name, host_object, host_compiled = compile_from_string( host_toolchain, self.boost_module.name, host_code, object=True, **local_host_kwargs) device_checksum, device_mod_name, device_object, device_compiled = compile_from_string( nvcc_toolchain, 'gpu', device_code, 'gpu.cu', object=True, **local_nvcc_kwargs) # The name of the shared lib depends on the hex checksums of both # host and device code to prevent accidentially returned a cached # module with wrong linkage mod_name = "codepy.temp.%s.%s.module" % (host_checksum, device_checksum) if host_compiled or device_compiled: return link_extension(host_toolchain, [host_object, device_object], mod_name, **kwargs) else: import os.path destination_base, first_object = os.path.split(host_object) module_path = os.path.join(destination_base, mod_name + host_toolchain.so_ext) try: from imp import load_dynamic return load_dynamic(mod_name, module_path) except: return link_extension(host_toolchain, [host_object, device_object], mod_name, **kwargs)
def compile(self, toolchain, **kwargs): """Return the extension module generated from the code described by *self*. If necessary, build the code using *toolchain* with :func:`codepy.jit.extension_from_string`. Any keyword arguments accept by that latter function may be passed in *kwargs*. """ from codepy.libraries import add_boost_python toolchain = toolchain.copy() add_boost_python(toolchain) from codepy.jit import extension_from_string return extension_from_string(toolchain, self.name, str(self.generate())+"\n", **kwargs)
def compile(self, toolchain, **kwargs): """Return the extension module generated from the code described by *self*. If necessary, build the code using *toolchain* with :func:`codepy.jit.extension_from_string`. Any keyword arguments accept by that latter function may be passed in *kwargs*. """ from codepy.libraries import add_boost_python toolchain = toolchain.copy() add_boost_python(toolchain) from codepy.jit import extension_from_string return extension_from_string(toolchain, self.name, "{}\n".format(self.generate()), **kwargs)
def initialize_asp_mod(self): # Create ASP module SVM.asp_mod = asp_module.ASPModule(use_cuda=SVM.use_cuda) if SVM.use_cuda: self.insert_cache_controller_code_into_listed_modules(['c++', 'cuda']) self.insert_base_code_into_listed_modules(['c++']) self.insert_non_rendered_code_into_module() self.insert_rendered_code_into_cuda_module() SVM.asp_mod.backends['cuda'].toolchain.cflags.extend(["-Xcompiler","-fPIC", "-arch=sm_%s%s" % SVM.platform_info['cuda']['capability'] ]) SVM.asp_mod.backends['c++'].toolchain.cflags.extend(["-lcublas"]) SVM.asp_mod.backends['c++'].compilable = False # TODO: For now, must force ONLY cuda backend to compile #print SVM.asp_mod.generate() # Setup toolchain for name, mod in SVM.asp_mod.backends.iteritems(): add_numpy(mod.toolchain) add_boost_python(mod.toolchain) if name in ['cuda']: add_cuda(mod.toolchain) return SVM.asp_mod
def compile(self, host_toolchain, nvcc_toolchain, host_kwargs=None, nvcc_kwargs=None, **kwargs): """Return the extension module generated from the code described by *self*. If necessary, build the code using *toolchain* with :func:`codepy.jit.extension_from_string`. Any keyword arguments accept by that latter function may be passed in *kwargs*. """ if host_kwargs is None: host_kwargs = {} if nvcc_kwargs is None: nvcc_kwargs = {} from codepy.libraries import add_boost_python, add_cuda host_toolchain = host_toolchain.copy() add_boost_python(host_toolchain) add_cuda(host_toolchain) nvcc_toolchain = nvcc_toolchain.copy() add_cuda(nvcc_toolchain) host_code = "{}\n".format(self.boost_module.generate()) device_code = "{}\n".format(self.generate()) from codepy.jit import compile_from_string from codepy.jit import link_extension local_host_kwargs = kwargs.copy() local_host_kwargs.update(host_kwargs) local_nvcc_kwargs = kwargs.copy() local_nvcc_kwargs.update(nvcc_kwargs) # Don't compile shared objects, just normal objects # (on some platforms, they're different) host_checksum, host_mod_name, host_object, host_compiled = \ compile_from_string( host_toolchain, self.boost_module.name, host_code, object=True, **local_host_kwargs) device_checksum, device_mod_name, device_object, device_compiled = \ compile_from_string( nvcc_toolchain, "gpu", device_code, "gpu.cu", object=True, **local_nvcc_kwargs) # The name of the shared lib depends on the hex checksums of both # host and device code to prevent accidentally returned a cached # module with wrong linkage mod_name = f"codepy.temp.{host_checksum}.{device_checksum}.module" if host_compiled or device_compiled: return link_extension(host_toolchain, [host_object, device_object], mod_name, **kwargs) else: import os.path destination_base, first_object = os.path.split(host_object) module_path = os.path.join(destination_base, mod_name + host_toolchain.so_ext) try: from imp import load_dynamic return load_dynamic(mod_name, module_path) except Exception: return link_extension(host_toolchain, [host_object, device_object], mod_name, **kwargs)
MODULE_CODE = """ #include <boost/python.hpp> namespace { char const *greet() { return "hello world"; } } BOOST_PYTHON_MODULE(module) { boost::python::def("greet", &greet); } """ from codepy.toolchain import guess_toolchain toolchain = guess_toolchain() from codepy.libraries import add_boost_python add_boost_python(toolchain) from codepy.jit import extension_from_string cmod = extension_from_string(toolchain, "module", MODULE_CODE) print cmod.greet()
MODULE_CODE = """ #include <boost/python.hpp> namespace { char const *greet() { return "hello world"; } } BOOST_PYTHON_MODULE(module) { boost::python::def("greet", &greet); } """ from codepy.toolchain import guess_toolchain toolchain = guess_toolchain() from codepy.libraries import add_boost_python add_boost_python(toolchain) from codepy.jit import extension_from_string cmod = extension_from_string(toolchain, "module", MODULE_CODE) print(cmod.greet())
def create_native(self): from cgen import (ArrayOf, POD, Block, For, Statement, Struct) from cgen import dtype_to_ctype import numpy members = [] code = [] for pk, pv in config.parameters.iteritems(): if isinstance(pv, int): members.append(POD(numpy.int, pk)) code.append( Statement("params.%s = extract<%s>(cppdict[\"%s\"])" % (pk, dtype_to_ctype(numpy.int), pk))) elif isinstance(pv, float): members.append(POD(numpy.float64, pk)) code.append( Statement("params.%s = extract<%s>(cppdict[\"%s\"])" % (pk, dtype_to_ctype(numpy.float64), pk))) elif isinstance(pv, list): if isinstance(pv[0], int): members.append(ArrayOf(POD(numpy.int, pk), len(pv))) code.append( Block([ Statement("list v = extract<%s>(cppdict[\"%s\"])" % (list.__name__, pk)), For( "unsigned int i = 0", "i<len(v)", "++i", Statement("params.%s[i] = extract<%s>(v[i])" % (pk, dtype_to_ctype(numpy.int)))), ])) elif isinstance(pv[0], float): members.append(ArrayOf(POD(numpy.float64, pk), len(pv))) code.append( Block([ Statement("list v = extract<%s>(cppdict[\"%s\"])" % (list.__name__, pk)), For( "unsigned int i = 0", "i < len(v)", "++i", Block([ Statement( "params.%s[i] = extract<%s>(v[i])" % (pk, dtype_to_ctype(numpy.float64))), Statement( "//std::cout << params.%s[i] << std::endl" % (pk)) ])), ])) mystruct = Struct('Parameters', members) mycode = Block(code) # print mystruct # print mycode from jinja2 import Template tpl = Template(""" #include <boost/python.hpp> #include <boost/python/object.hpp> #include <boost/python/extract.hpp> #include <boost/python/list.hpp> #include <boost/python/dict.hpp> #include <boost/python/str.hpp> #include <stdexcept> #include <iostream> {{my_struct}} Parameters params; void CopyDictionary(boost::python::object pydict) { using namespace boost::python; extract< dict > cppdict_ext(pydict); if(!cppdict_ext.check()){ throw std::runtime_error( "PassObj::pass_dict: type error: not a python dict."); } dict cppdict = cppdict_ext(); list keylist = cppdict.keys(); {{my_extractor}} } BOOST_PYTHON_MODULE({{my_module}}) { boost::python::def("copy_dict", &CopyDictionary); } """) rendered_tpl = tpl.render(my_module="NativeParameters", my_extractor=mycode, my_struct=mystruct) # print rendered_tpl from codepy.toolchain import NVCCToolchain import codepy.toolchain kwargs = codepy.toolchain._guess_toolchain_kwargs_from_python_config() # print kwargs kwargs["cc"] = "nvcc" # kwargs["cflags"]=["-m64","-x","cu","-Xcompiler","-fPIC","-ccbin","/opt/local/bin/g++-mp-4.4"] kwargs["cflags"] = ["-m64", "-x", "cu", "-Xcompiler", "-fPIC"] kwargs["include_dirs"].append("/usr/local/cuda/include") kwargs["defines"] = [] kwargs["ldflags"] = ["-shared"] # kwargs["libraries"]=["python2.7"] kwargs["libraries"] = ["python2.6"] print kwargs toolchain = NVCCToolchain(**kwargs) from codepy.libraries import add_boost_python add_boost_python(toolchain) from codepy.jit import extension_from_string mymod = extension_from_string(toolchain, "NativeParameters", rendered_tpl) mymod.copy_dict(config.parameters)
def compile(self, host_toolchain, nvcc_toolchain, host_kwargs={}, nvcc_kwargs={}, **kwargs): """Return the extension module generated from the code described by *self*. If necessary, build the code using *toolchain* with :func:`codepy.jit.extension_from_string`. Any keyword arguments accept by that latter function may be passed in *kwargs*. """ from codepy.libraries import add_boost_python, add_cuda host_toolchain = host_toolchain.copy() add_boost_python(host_toolchain) add_cuda(host_toolchain) nvcc_toolchain = nvcc_toolchain.copy() add_cuda(nvcc_toolchain) host_code = str(self.boost_module.generate()) + "\n" device_code = str(self.generate()) + "\n" from codepy.jit import compile_from_string, extension_from_string from codepy.jit import link_extension local_host_kwargs = kwargs.copy() local_host_kwargs.update(host_kwargs) local_nvcc_kwargs = kwargs.copy() local_nvcc_kwargs.update(nvcc_kwargs) # Don't compile shared objects, just normal objects # (on some platforms, they're different) host_mod_name, host_object, host_compiled = compile_from_string( host_toolchain, self.boost_module.name, host_code, object=True, **local_host_kwargs) device_mod_name, device_object, device_compiled = compile_from_string( nvcc_toolchain, 'gpu', device_code, 'gpu.cu', object=True, **local_nvcc_kwargs) if host_compiled or device_compiled: return link_extension(host_toolchain, [host_object, device_object], host_mod_name, **kwargs) else: import os.path destination_base, first_object = os.path.split(host_object) module_path = os.path.join(destination_base, host_mod_name + host_toolchain.so_ext) try: from imp import load_dynamic return load_dynamic(host_mod_name, module_path) except: return link_extension(host_toolchain, [host_object, device_object], host_mod_name, **kwargs)