def compile(self, code): # Note, we do not add carray or particle_array as nnps_base would # have been rebuilt anyway if they changed. root = expanduser(join('~', '.pypsh', 'source', get_platform_dir())) depends = ["pysph.base.nnps_base"] # Add pysph/base directory to inc_dirs for including spatial_hash.h # for SpatialHashNNPS extra_inc_dirs = [join(dirname(dirname(realpath(__file__))), 'base')] self._ext_mod = ExtModule( code, verbose=True, root=root, depends=depends, extra_inc_dirs=extra_inc_dirs ) self._module = self._ext_mod.load() return self._module
class AccelerationEvalCythonHelper(object): def __init__(self, acceleration_eval): self.object = acceleration_eval self.config = get_config() self.all_array_names = get_all_array_names(self.object.particle_arrays) self.known_types = get_known_types_for_arrays(self.all_array_names) self._ext_mod = None self._module = None ########################################################################## # Public interface. ########################################################################## def get_code(self): path = join(dirname(__file__), 'acceleration_eval_cython.mako') template = Template(filename=path) main = template.render(helper=self) return main def setup_compiled_module(self, module): # Create the compiled module. object = self.object acceleration_eval = module.AccelerationEval(object.kernel, object.all_group.equations, object.particle_arrays) object.set_compiled_object(acceleration_eval) def compile(self, code): # Note, we do not add carray or particle_array as nnps_base would # have been rebuilt anyway if they changed. root = expanduser(join('~', '.pysph', 'source', get_platform_dir())) depends = ["pysph.base.nnps_base"] # Add pysph/base directory to inc_dirs for including spatial_hash.h # for SpatialHashNNPS extra_inc_dirs = [join(dirname(dirname(realpath(__file__))), 'base')] self._ext_mod = ExtModule(code, verbose=True, root=root, depends=depends, extra_inc_dirs=extra_inc_dirs) self._module = self._ext_mod.load() return self._module ########################################################################## # Mako interface. ########################################################################## def get_array_decl_for_wrapper(self): array_names = self.all_array_names decl = [] for a_type in sorted(array_names.keys()): props = array_names[a_type] decl.append('cdef public {a_type} {attrs}'.format( a_type=a_type, attrs=', '.join(sorted(props)))) return '\n'.join(decl) def get_header(self): object = self.object helpers = [] headers = [] headers.extend(get_cython_code(object.kernel)) if hasattr(object.kernel, '_get_helpers_'): helpers.extend(object.kernel._get_helpers_()) # get headers from the Equations for equation in object.all_group.equations: headers.extend(get_cython_code(equation)) if hasattr(equation, '_get_helpers_'): for helper in equation._get_helpers_(): if helper not in helpers: helpers.append(helper) headers.extend(get_helper_code(helpers)) # Kernel wrappers. cg = CythonGenerator(known_types=self.known_types) cg.parse(object.kernel) headers.append(cg.get_code()) # Equation wrappers. self.known_types['SPH_KERNEL'] = KnownType( object.kernel.__class__.__name__) headers.append(object.all_group.get_equation_wrappers( self.known_types)) return '\n'.join(headers) def get_equation_defs(self): return self.object.all_group.get_equation_defs() def get_equation_init(self): return self.object.all_group.get_equation_init() def get_kernel_defs(self): return 'cdef public %s kernel' % ( self.object.kernel.__class__.__name__) def get_kernel_init(self): object = self.object return 'self.kernel = %s(**kernel.__dict__)' % ( object.kernel.__class__.__name__) def get_variable_declarations(self): group = self.object.all_group ctx = group.context return group.get_variable_declarations(ctx) def get_array_declarations(self): group = self.object.all_group src, dest = group.get_array_names() src.update(dest) return group.get_array_declarations(src, self.known_types) def get_dest_array_setup(self, dest_name, eqs_with_no_source, sources, real): src, dest_arrays = eqs_with_no_source.get_array_names() for g in sources.values(): s, d = g.get_array_names() dest_arrays.update(d) lines = ['NP_DEST = self.%s.size(real=%s)' % (dest_name, real)] lines += ['%s = dst.%s.data' % (n, n[2:]) for n in sorted(dest_arrays)] return '\n'.join(lines) def get_src_array_setup(self, src_name, eq_group): src_arrays, dest = eq_group.get_array_names() lines = ['NP_SRC = self.%s.size()' % src_name] lines += ['%s = src.%s.data' % (n, n[2:]) for n in sorted(src_arrays)] return '\n'.join(lines) def get_parallel_block(self): if self.config.use_openmp: return "with nogil, parallel():" else: return "if True: # Placeholder used for OpenMP." def get_parallel_range(self, start, stop=None, step=1): return get_parallel_range(start, stop, step) def get_particle_array_names(self): parrays = [pa.name for pa in self.object.particle_arrays] return ', '.join(parrays)