def visit_Iteration(self, o): body = flatten(self._visit(i) for i in o.children) # Start if o.offsets[0] != 0: start = str(o.limits[0] + o.offsets[0]) try: start = eval(start) except (NameError, TypeError): pass else: start = o.limits[0] # Bound if o.offsets[1] != 0: end = str(o.limits[1] + o.offsets[1]) try: end = eval(end) except (NameError, TypeError): pass else: end = o.limits[1] # For backward direction flip loop bounds if o.direction == Backward: loop_init = 'int %s = %s' % (o.index, ccode(end)) loop_cond = '%s >= %s' % (o.index, ccode(start)) loop_inc = '%s -= %s' % (o.index, o.limits[2]) else: loop_init = 'int %s = %s' % (o.index, ccode(start)) loop_cond = '%s <= %s' % (o.index, ccode(end)) loop_inc = '%s += %s' % (o.index, o.limits[2]) # Append unbounded indices, if any if o.uindices: uinit = [ '%s = %s' % (i.name, ccode(i.symbolic_start)) for i in o.uindices ] loop_init = c.Line(', '.join([loop_init] + uinit)) ustep = [ '%s = %s' % (i.name, ccode(i.symbolic_incr)) for i in o.uindices ] loop_inc = c.Line(', '.join([loop_inc] + ustep)) # Create For header+body handle = c.For(loop_init, loop_cond, loop_inc, c.Block(body)) # Attach pragmas, if any if o.pragmas: handle = c.Module(o.pragmas + (handle, )) return handle
def visit_NestedFieldEvalNode(self, node): self.visit(node.fields) self.visit(node.args) cstat = [] args = self._check_FieldSamplingArguments(node.args.ccode) for fld in node.fields.obj: ccode_eval = fld.ccode_eval(node.var, *args) ccode_conv = fld.ccode_convert(*args) conv_stat = c.Statement("%s *= %s" % (node.var, ccode_conv)) cstat += [ c.Assign("err", ccode_eval), conv_stat, c.If( "err != ERROR_OUT_OF_BOUNDS ", c.Block([ c.Statement("CHECKSTATUS(err)"), c.Statement("break") ])) ] cstat += [c.Statement("CHECKSTATUS(err)"), c.Statement("break")] node.ccode = c.While("1==1", c.Block(cstat))
def generate(self, funcname, field_args, const_args, kernel_ast): ccode = [] # Add include for Parcels and math header ccode += [str(c.Include("parcels.h", system=False))] ccode += [str(c.Include("math.h", system=False))] # Generate type definition for particle type vdecl = [c.POD(v.dtype, v.name) for v in self.ptype.variables] ccode += [str(c.Typedef(c.GenerableStruct("", vdecl, declname=self.ptype.name)))] # Insert kernel code ccode += [str(kernel_ast)] # Generate outer loop for repeated kernel invocation args = [c.Value("int", "num_particles"), c.Pointer(c.Value(self.ptype.name, "particles")), c.Value("double", "endtime"), c.Value("float", "dt")] for field, _ in field_args.items(): args += [c.Pointer(c.Value("CField", "%s" % field))] for const, _ in const_args.items(): args += [c.Value("float", const)] fargs_str = ", ".join(['particles[p].time', 'sign * __dt'] + list(field_args.keys()) + list(const_args.keys())) # Inner loop nest for forward runs sign = c.Assign("sign", "dt > 0. ? 1. : -1.") dt_pos = c.Assign("__dt", "fmin(fabs(particles[p].dt), fabs(endtime - particles[p].time))") body = [c.Assign("res", "%s(&(particles[p]), %s)" % (funcname, fargs_str))] body += [c.Assign("particles[p].state", "res")] # Store return code on particle body += [c.If("res == SUCCESS", c.Block([c.Statement("particles[p].time += sign * __dt"), dt_pos, c.Statement("continue")]))] body += [c.If("res == REPEAT", c.Block([dt_pos, c.Statement("continue")]), c.Statement("break"))] time_loop = c.While("__dt > __tol", c.Block(body)) part_loop = c.For("p = 0", "p < num_particles", "++p", c.Block([dt_pos, time_loop])) fbody = c.Block([c.Value("int", "p"), c.Value("ErrorCode", "res"), c.Value("double", "__dt, __tol, sign"), c.Assign("__tol", "1.e-6"), sign, part_loop]) fdecl = c.FunctionDeclaration(c.Value("void", "particle_loop"), args) ccode += [str(c.FunctionBody(fdecl, fbody))] return "\n\n".join(ccode)
def _generate_kernel_func(self): self._components['KERNEL_FUNC'] = cgen.FunctionBody( cgen.FunctionDeclaration( cgen.DeclSpecifier( cgen.Value("void", 'k_' + self._kernel.name), 'inline' ), self._components['KERNEL_ARG_DECLS'] ), cgen.Block([ cgen.Line(self._kernel.code) ]) )
def execute_function_body(self): statements = [] if self.profiling: statements += [ cgen.Assign(cgen.Value("int", "assign"), "opesci_papi_init()") ] statements += [self.grid.define_constants] statements += [self.grid.declare_fields] statements += self.execute_parallel_block() statements.append(self.grid.store_fields) statements.append(cgen.Statement("return 0")) return cgen.Block(statements)
def _generate_lib_func(self): block = cgen.Block([ self.loop_timer.get_cpp_pre_loop_code_ast(), self._components['LIB_OUTER_LOOP'], self.loop_timer.get_cpp_post_loop_code_ast() ]) self._components['LIB_FUNC'] = cgen.FunctionBody( cgen.FunctionDeclaration( cgen.Value("void", self._components['LIB_NAME']), self._components['LIB_ARG_DECLS'] + self._components['KERNEL_LIB_ARG_DECLS']), block)
def visit_SummedVectorFieldEvalNode(self, node): self.visit(node.fields) self.visit(node.args) cstat = [] for fld, var, var2, var3 in zip(node.fields.obj, node.var, node.var2, node.var3): ccode_eval = fld.ccode_eval(var, var2, var3, fld.U, fld.V, fld.W, *node.args.ccode) if fld.U.interp_method != 'cgrid_velocity': ccode_conv1 = fld.U.ccode_convert(*node.args.ccode) ccode_conv2 = fld.V.ccode_convert(*node.args.ccode) statements = [c.Statement("%s *= %s" % (var, ccode_conv1)), c.Statement("%s *= %s" % (var2, ccode_conv2))] else: statements = [] if fld.vector_type == '3D': ccode_conv3 = fld.W.ccode_convert(*node.args.ccode) statements.append(c.Statement("%s *= %s" % (var3, ccode_conv3))) cstat += [c.Assign("err", ccode_eval), c.Block(statements)] cstat += [c.Statement("CHECKERROR(err)")] node.ccode = c.Block(cstat)
def visit_SummedVectorFieldEvalNode(self, node): self.visit(node.field) self.visit(node.args) cstat = [] if node.field.obj.W: Wlist = node.field.obj.W else: Wlist = [None] * len(node.field.obj.U) for U, V, W, var, var2, var3 in zip(node.field.obj.U, node.field.obj.V, Wlist, node.var, node.var2, node.var3): vfld = VectorField(node.field.obj.name, U, V, W) ccode_eval = vfld.ccode_eval(var, var2, var3, U, V, W, *node.args.ccode) ccode_conv1 = U.ccode_convert(*node.args.ccode) ccode_conv2 = V.ccode_convert(*node.args.ccode) statements = [c.Statement("%s *= %s" % (var, ccode_conv1)), c.Statement("%s *= %s" % (var2, ccode_conv2))] if var3: ccode_conv3 = W.ccode_convert(*node.args.ccode) statements.append(c.Statement("%s *= %s" % (var3, ccode_conv3))) conv_stat = c.Block(statements) cstat += [c.Assign("err", ccode_eval), conv_stat, c.Statement("CHECKERROR(err)")] node.ccode = c.Block(cstat)
def _generate_lib_outer_loop(self): block = cgen.Block([ self._components['LIB_KERNEL_GATHER'], self._components['LIB_INNER_LOOP'], self._components['LIB_KERNEL_SCATTER'] ]) i = self._components['LIB_PAIR_INDEX_0'] loop = cgen.For('int ' + i + '=0', i + '<_N_LOCAL', i + '++', block) self._components['LIB_OUTER_LOOP'] = loop
def _dump_storage(self, iet, storage): mapper = {} for k, v in storage.items(): # Expr -> LocalExpr ? if k.is_Expression: mapper[k] = v continue # allocs/pallocs allocs = flatten(v.allocs) for tid, body in as_mapper(v.pallocs, itemgetter(0), itemgetter(1)).items(): header = self.lang.Region._make_header(tid.symbolic_size) init = c.Initializer(c.Value(tid._C_typedata, tid.name), self.lang['thread-num']) allocs.append(c.Module((header, c.Block([init] + body)))) if allocs: allocs.append(c.Line()) # frees/pfrees frees = [] for tid, body in as_mapper(v.pfrees, itemgetter(0), itemgetter(1)).items(): header = self.lang.Region._make_header(tid.symbolic_size) init = c.Initializer(c.Value(tid._C_typedata, tid.name), self.lang['thread-num']) frees.append(c.Module((header, c.Block([init] + body)))) frees.extend(flatten(v.frees)) if frees: frees.insert(0, c.Line()) mapper[k] = k._rebuild(body=List(header=allocs, body=k.body, footer=frees), **k.args_frozen) processed = Transformer(mapper, nested=True).visit(iet) return processed
def visit_FieldEvalNode(self, node): self.visit(node.field) self.visit(node.args) if node.var2: # evaluation UV Field ccode_eval = node.field.obj.ccode_evalUV(node.var, node.var2, *node.args.ccode) ccode_conv1 = node.field.obj.fieldset.U.ccode_convert( *node.args.ccode) ccode_conv2 = node.field.obj.fieldset.V.ccode_convert( *node.args.ccode) conv_stat = c.Block([ c.Statement("%s *= %s" % (node.var, ccode_conv1)), c.Statement("%s *= %s" % (node.var2, ccode_conv2)) ]) else: ccode_eval = node.field.obj.ccode_eval(node.var, *node.args.ccode) ccode_conv = node.field.obj.ccode_convert(*node.args.ccode) conv_stat = c.Statement("%s *= %s" % (node.var, ccode_conv)) node.ccode = c.Block([ c.Assign("err", ccode_eval), conv_stat, c.Statement("CHECKERROR(err)") ])
def visit_FieldEvalNode(self, node): self.visit(node.field) self.visit(node.args) ccode_eval = node.field.obj.ccode_eval(node.var, *node.args.ccode) stmts = [c.Assign("err", ccode_eval)] if node.convert: ccode_conv = node.field.obj.ccode_convert(*node.args.ccode) conv_stat = c.Statement("%s *= %s" % (node.var, ccode_conv)) stmts += [conv_stat] node.ccode = c.Block(stmts + [c.Statement("CHECKSTATUS(err)")])
def visit_SummedFieldEvalNode(self, node): self.visit(node.fields) self.visit(node.args) cstat = [] for fld, var in zip(node.fields.obj, node.var): ccode_eval = fld.ccode_eval(var, *node.args.ccode) ccode_conv = fld.ccode_convert(*node.args.ccode) conv_stat = c.Statement("%s *= %s" % (var, ccode_conv)) cstat += [ c.Assign("err", ccode_eval), conv_stat, c.Statement("CHECKSTATUS(err)") ] node.ccode = c.Block(cstat)
def run_dependencies(): """Go throw all dependencies, find indexes for each dependency and put each created dependency into c.Statement :return c.Block containing all statements """ block_with_statements = [] for dependency_name, arrays in dependencies.items(): if arrays: for array in arrays: array_name = array['array_name'] distances = array['distance'] optimize = array['optimize'] mix_in = array['mix_in'] if not mix_in == 'random' and not mix_in == 'num_val': raise KeyError("Mix_in can be only 'random' or 'num_val'") for arr_name, arr_size in all_arrays.items(): if array_name == arr_name: dest_array = array_name src_array = [ array_name for _ in range(len(distances[0]) - 1) ] for index in range(len(arr_size)): distance = distances[index] if distance[0] == 0: dest_dist = '' elif str(distance[0])[0] == '-': dest_dist = str(distance[0]) else: dest_dist = '+' + str(distance[0]) for i in range(1, len(distances[0])): if distance[i] == 0: src_dist = '' elif str(distance[i])[0] == '-': src_dist = str(distance[i]) else: src_dist = '+' + str(distance[i]) src_array[ i - 1] += f'[{generate_loop_index(index % loop_nest_level)}{src_dist}]' dest_array += f'[{generate_loop_index(index % loop_nest_level)}{dest_dist}]' src_array_str = '' for src in src_array: src_array_str += src + random.choice( maths_operations) stmt = dependency_function[dependency_name]( dest_array, src_array_str[:-1], optimize, mix_in) if stmt: block_with_statements.append( c.Statement('\n' + add_indent() + stmt)) return c.Block(block_with_statements)
def __generate_string_methods(self): declaration = c.FunctionDeclaration(c.Value('void', 'pack_string'), [ self._data_object_ref(self._packet_type(), 'packet'), self._data_cref(self.get_dtype('string'), 'str') ]) body = c.FunctionBody( declaration, c.Block([ c.Statement('uint8_t size = str.size()'), c.Statement(f'*packet << static_cast<uint8_t>(size)'), c.For( 'uint8_t i = 0', 'i < size', '++i', c.Block([ c.Statement(f'*packet << static_cast<uint8_t>(str[i])') ])) ])) self.marshal.header.append(declaration) self.marshal.source.append(body) declaration = c.FunctionDeclaration(c.Value('bool', 'unpack_string'), [ self._pointer_type('PacketReader', 'packet'), self._data_object_ref(self.get_dtype('string'), 'str') ]) body = c.FunctionBody( declaration, c.Block([ WrapUnpack._guard('sizeof(uint8_t)'), c.Statement('uint8_t size = packet->peek<uint8_t>()'), WrapUnpack._guard('size'), c.Statement(f'str = packet->read<std::string>()'), c.Statement('return true') ])) self.marshal.header.append(declaration) self.marshal.source.append(body)
def left_contractions(self, pos): """Generates the code computing the left-contraction part of the opimization matrix for site nr. `pos` :param pos: The local tensor to copy (should be `< len(X)`) :returns: List containing cgen Statements """ if pos == 0: return [c.Statement('left_c[0] = 1')] result = self.copy_ltens_to_share(0) result += [c.Line()] contract_ltens_with_a = 'dgemv(blasNoTranspose, x_shared, current_row + {offset:d}, {dim_out:d}, {dim_in:d}, {target:})' src = contract_ltens_with_a.format(offset=0, dim_out=self._ranks[0], dim_in=self._dims[0], target='left_c') # We need to check this every time and can't simpy return since # otherwise __syncthreads crashes result += [c.If('mid < %i' % self._meas, c.Statement(src))] for i in range(1, pos): result += self.copy_ltens_to_share(i) result += [c.Line()] # Since we assume A to consist of product measurements result += [ c.If( 'mid < %i' % self._meas, c.Block([ c.Statement( contract_ltens_with_a.format( offset=sum(self._dims[:i]), dim_out=self._ranks[i - 1] * self._ranks[i], dim_in=self._dims[i], target='tmat_c')), c.Statement( 'dgemv(blasTranspose, tmat_c, left_c, {rank_l}, {rank_r}, buf_c)' .format(rank_l=self._ranks[i - 1], rank_r=self._ranks[i])), c.Statement( 'memcpy(left_c, buf_c, sizeof({ctype}) * {rank_r})' .format(ctype=c.dtype_to_ctype(self._dtype), rank_r=self._ranks[i])) ])), c.Line() ] return result
def execute_time_loop(self): statements = [] statements.append(self.grid.time_stepping) if self.pluto: statements.append(cgen.Pragma("scop")) statements.append(self.grid.primary_loop) if self.pluto: statements.append(cgen.Pragma("endscop")) output_step = self.grid.output_step if output_step: statements.append(output_step) result = cgen.For(cgen.InlineInitializer(cgen.Value("int", "_ti"), 0), "_ti < ntsteps", "_ti++", cgen.Block(statements)) return result
def visit_NestedVectorFieldEvalNode(self, node): self.visit(node.fields) self.visit(node.args) cstat = [] for fld in node.fields.obj: ccode_eval = fld.ccode_eval(node.var, node.var2, node.var3, fld.U, fld.V, fld.W, *node.args.ccode) if fld.U.interp_method != 'cgrid_velocity': ccode_conv1 = fld.U.ccode_convert(*node.args.ccode) ccode_conv2 = fld.V.ccode_convert(*node.args.ccode) statements = [c.Statement("%s *= %s" % (node.var, ccode_conv1)), c.Statement("%s *= %s" % (node.var2, ccode_conv2))] else: statements = [] if fld.vector_type == '3D': ccode_conv3 = fld.W.ccode_convert(*node.args.ccode) statements.append(c.Statement("%s *= %s" % (node.var3, ccode_conv3))) cstat += [c.Assign("err", ccode_eval), c.Block(statements), c.If("err != ERROR_OUT_OF_BOUNDS ", c.Block([c.Statement("CHECKERROR(err)"), c.Statement("break")]))] cstat += [c.Statement("CHECKERROR(err)"), c.Statement("break")] node.ccode = c.While("1==1", c.Block(cstat))
def eval(self, generator): block = c.Collection() raw_variable = f'data.{self.base.name.eval()}' if self.base.optional: variable = f'*{raw_variable}' block.append( c.Statement(f'*packet << static_cast<bool>({raw_variable})')) block.append( c.If(raw_variable, c.Block([self._pack(generator, variable)]))) else: block.append(self._pack(generator, raw_variable)) return block
def right_contractions(self, pos): """Generates the code computing the right-contraction part of the opimization matrix for site nr. `pos` :param pos: The local tensor to copy (should be `< len(X)`) :returns: List containing cgen Statements """ if pos == self._sites - 1: return [c.Statement('right_c[0] = 1')] result = self.copy_ltens_to_share(self._sites - 1) result += [c.Line()] contract_ltens_with_a = 'dgemv(blasNoTranspose, x_shared, current_row + {offset:d}, {dim_out:d}, {dim_in:d}, {target:})' src = contract_ltens_with_a.format(offset=sum(self._dims[:-1]), dim_out=self._ranks[-1], dim_in=self._dims[-1], target='right_c') result += [c.If('mid < %i' % self._meas, c.Statement(src))] for i in range(self._sites - 2, pos, -1): result += self.copy_ltens_to_share(i) result += [c.Line()] # Since we assume A to consist of product measurements result += [ c.If( 'mid < %i' % self._meas, c.Block([ c.Statement( contract_ltens_with_a.format( offset=sum(self._dims[:i]), dim_out=self._ranks[i - 1] * self._ranks[i], dim_in=self._dims[i], target='tmat_c')), c.Statement( 'dgemv(blasNoTranspose, tmat_c, right_c, {rank_l}, {rank_r}, buf_c)' .format(rank_l=self._ranks[i - 1], rank_r=self._ranks[i])), c.Statement( 'memcpy(right_c, buf_c, sizeof({ctype}) * {rank_l})' .format(ctype=c.dtype_to_ctype(self._dtype), rank_l=self._ranks[i - 1])), ])), c.Line() ] return result
def visit_Iteration(self, o): body = flatten(self.visit(i) for i in o.children) # Start if o.offsets[0] != 0: start = "%s + %s" % (o.limits[0], -o.offsets[0]) try: start = eval(start) except (NameError, TypeError): pass else: start = o.limits[0] # Bound if o.offsets[1] != 0: end = "%s - %s" % (o.limits[1], o.offsets[1]) try: end = eval(end) except (NameError, TypeError): pass else: end = o.limits[1] # For reverse dimensions flip loop bounds if o.reverse: loop_init = 'int %s = %s' % (o.index, ccode('%s - 1' % end)) loop_cond = '%s >= %s' % (o.index, ccode(start)) loop_inc = '%s -= %s' % (o.index, o.limits[2]) else: loop_init = 'int %s = %s' % (o.index, ccode(start)) loop_cond = '%s < %s' % (o.index, ccode(end)) loop_inc = '%s += %s' % (o.index, o.limits[2]) # Append unbounded indices, if any if o.uindices: uinit = ['%s = %s' % (i.index, ccode(i.start)) for i in o.uindices] loop_init = c.Line(', '.join([loop_init] + uinit)) ustep = ['%s = %s' % (i.index, ccode(i.step)) for i in o.uindices] loop_inc = c.Line(', '.join([loop_inc] + ustep)) # Create For header+body handle = c.For(loop_init, loop_cond, loop_inc, c.Block(body)) # Attach pragmas, if any if o.pragmas: handle = c.Module(o.pragmas + (handle, )) return handle
def _pack(self, generator, variable): dtype = self.base.dtype.dtype.eval() if dtype == 'vector': spec = self.base.dtype.spec.eval() return c.Collection([ c.Statement(f'*packet << static_cast<uint8_t>(({variable}).size())'), CppRangedFor('auto& x', variable, c.Block([ c.Statement('*packet << x') if generator.is_trivial(spec) else \ c.Statement(f'pack_{spec}(packet, x)') ])) ]) else: if generator.is_trivial(dtype): return c.Statement(f'*packet << {variable}') else: return c.Statement(f'pack_{dtype}(packet, {variable})')
def visit_FunctionDef(self, node): # Generate "ccode" attribute by traversing the Python AST for stmt in node.body: self.visit(stmt) # Create function declaration and argument list decl = c.Static(c.DeclSpecifier(c.Value("KernelOp", node.name), spec='inline')) args = [c.Pointer(c.Value(self.ptype.name, "particle")), c.Value("double", "time"), c.Value("float", "dt")] for field, _ in self.field_args.items(): args += [c.Pointer(c.Value("CField", "%s" % field))] # Create function body as C-code object body = [stmt.ccode for stmt in node.body] body += [c.Statement("return SUCCESS")] node.ccode = c.FunctionBody(c.FunctionDeclaration(decl, args), c.Block(body))
def convergence_function(self): statements = [] statements.append(self.grid.define_constants) statements.append(self.grid.load_fields) statements.append(self.grid.converge_test) statements.append(cgen.Statement("return 0")) return cgen.FunctionBody( cgen.Extern( "C", cgen.FunctionDeclaration( cgen.Value('int', 'opesci_convergence'), [ cgen.Pointer( cgen.Value(self._grid_structure_name, "grid")), cgen.Pointer( cgen.Value(self.__convergence_structure_name, "conv")) ])), cgen.Block(statements))
def _pack(self, generator, variable): dtype = self.base.dtype.dtype.eval() if dtype == 'vector': spec = self.base.dtype.spec.eval() return c.Collection([ c.Statement(f'packet.Data.WriteInt8((byte)(({variable}).Count))'), CSharpRangedFor('var x', variable, c.Block([ c.Statement(f'packet.Data.Write{WRITE_MAP[spec]}(x)') if generator.is_trivial(spec) else \ c.Statement(f'pack_{spec}(packet, x)') ])) ]) else: if generator.is_trivial(dtype): return c.Statement( f'packet.Data.Write{WRITE_MAP[dtype]}({variable})') else: return c.Statement(f'pack_{dtype}(packet, {variable})')
def ccode(self): """Generate C code for the represented stencil loop :returns: :class:`cgen.For` object representing the loop """ forward = self.limits[1] >= self.limits[0] loop_body = cgen.Block([s.ccode for s in self.expressions]) loop_init = cgen.InlineInitializer(cgen.Value("int", self.index), self.limits[0]) loop_cond = '%s %s %s' % (self.index, '<' if forward else '>', self.limits[1]) if self.limits[2] == 1: loop_inc = '%s%s' % (self.index, '++' if forward else '--') else: loop_inc = '%s %s %s' % (self.index, '+=' if forward else '-=', self.limits[2]) return cgen.For(loop_init, loop_cond, loop_inc, loop_body)
def eval(self, generator): block = c.Collection() raw_variable = f'data.{self.base.name.eval()}' if self.base.optional: variable = f'{raw_variable}.Value' block.append( c.Statement( f'packet.Data.WriteByte((byte)Convert.ToInt32({raw_variable}.HasValue))' )) block.append( c.If(f'{raw_variable}.HasValue', c.Block([self._pack(generator, variable)]))) else: block.append(self._pack(generator, raw_variable)) return block
def visit_FunctionDef(self, node): # Generate "ccode" attribute by traversing the Python AST for stmt in node.body: if not (hasattr(stmt, 'value') and type(stmt.value) is ast.Str): # ignore docstrings self.visit(stmt) # Create function declaration and argument list decl = c.Static( c.DeclSpecifier(c.Value("ErrorCode", node.name), spec='inline')) args = [ c.Pointer(c.Value(self.ptype.name, "particle")), c.Value("double", "time"), c.Value("float", "dt") ] for field_name, field in self.field_args.items(): if field_name != 'UV': args += [c.Pointer(c.Value("CField", "%s" % field_name))] for field_name, field in self.field_args.items(): if field_name == 'UV': fieldset = field.fieldset for f in ['U', 'V', 'cosU', 'sinU', 'cosV', 'sinV']: try: getattr(fieldset, f) if f not in self.field_args: args += [c.Pointer(c.Value("CField", "%s" % f))] except: if fieldset.U.grid.gtype in [ GridCode.CurvilinearZGrid, GridCode.CurvilinearSGrid ]: raise RuntimeError( "cosU, sinU, cosV and sinV fields must be defined for a proper rotation of U, V fields in curvilinear grids" ) for const, _ in self.const_args.items(): args += [c.Value("float", const)] # Create function body as C-code object body = [ stmt.ccode for stmt in node.body if not (hasattr(stmt, 'value') and type(stmt.value) is ast.Str) ] body += [c.Statement("return SUCCESS")] node.ccode = c.FunctionBody(c.FunctionDeclaration(decl, args), c.Block(body))
def visit_FunctionDef(self, node): # Generate "ccode" attribute by traversing the Python AST for stmt in node.body: if not (hasattr(stmt, 'value') and type(stmt.value) is ast.Str): # ignore docstrings self.visit(stmt) # Create function declaration and argument list decl = c.Static( c.DeclSpecifier(c.Value("ErrorCode", node.name), spec='inline')) args = [ c.Pointer(c.Value(self.ptype.name, "particle")), c.Value("double", "time"), c.Value("float", "dt") ] for field_name, field in self.field_args.items(): args += [c.Pointer(c.Value("CField", "%s" % field_name))] for field_name, field in self.vector_field_args.items(): fieldset = field.fieldset Wname = field.W.name if field.W else 'not_defined' for f in [field.U.name, field.V.name, Wname]: try: # Next line will break for example if field.U was created but not added to the fieldset getattr(fieldset, f) if f not in self.field_args: args += [c.Pointer(c.Value("CField", "%s" % f))] except: if f != Wname: raise RuntimeError( "Field %s needed by a VectorField but it does not exist" % f) else: pass for const, _ in self.const_args.items(): args += [c.Value("float", const)] # Create function body as C-code object body = [ stmt.ccode for stmt in node.body if not (hasattr(stmt, 'value') and type(stmt.value) is ast.Str) ] body += [c.Statement("return SUCCESS")] node.ccode = c.FunctionBody(c.FunctionDeclaration(decl, args), c.Block(body))
def gen_op_kernel_class_defn(class_name): class_defn = [] class_defn.append(c.Line("class " + class_name + ": public OpKernel")) class_body = [] class_body.append(c.Line("public:")) class_construct = gen_op_kernel_constructor(class_name) class_body.extend(class_construct) class_body.append(c.Line()) compute_fn = gen_op_kernel_compute_fn() class_body.extend(compute_fn) body = c.Block(class_body) class_defn.append(body) # TODO: fix this at some point class_defn.append(c.Line(";")) return class_defn