def _datadesc(obj: Any): from dace import data if isinstance(obj, data.Data): return obj elif symbolic.issymbolic(obj): return data.Scalar(symbolic.symtype(obj)) elif isinstance(obj, dtypes.typeclass): return data.Scalar(obj) return data.Scalar(dtypes.typeclass(type(obj)))
def testExpressionAssignment(self): code_str = "res = 5 + 3.1" symbols = type_inference.infer_types(code_str) self.assertEqual(symbols["res"], dtypes.typeclass(float)) code_str = "res = 5 + 1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["res"], dtypes.typeclass(int)) # use already defined symbol code_str = "res2 = 1 + res" symbols = type_inference.infer_types(code_str, symbols) self.assertEqual(symbols["res2"], dtypes.typeclass(float)) code_str = "res3 = 1 + int(res*res2)" symbols = type_inference.infer_types(code_str, symbols) self.assertEqual(symbols["res3"], dtypes.typeclass(int))
def testDefaultDataTypes(self): # check that configuration about defult data types is enforced config_data_types = Config.get('compiler', 'default_data_types') code_str = """value1 = 10 value2=3.14 value3=5000000000""" inf_symbols = type_inference.infer_types(code_str) if config_data_types.lower() == "python": self.assertEqual(inf_symbols["value1"], dtypes.typeclass(np.int64)) self.assertEqual(inf_symbols["value2"], dtypes.typeclass(np.float64)) elif config_data_types.lower() == "c": self.assertEqual(inf_symbols["value1"], dtypes.typeclass(np.int32)) self.assertEqual(inf_symbols["value2"], dtypes.typeclass(np.float32)) # in any case, value3 needs uint64 self.assertEqual(inf_symbols["value3"], dtypes.typeclass(np.uint64))
def create_datadescriptor(obj): """ Creates a data descriptor from various types of objects. @see: dace.data.Data """ from dace import dtypes # Avoiding import loops if isinstance(obj, Data): return obj try: return obj.descriptor except AttributeError: if isinstance(obj, numpy.ndarray): return Array(dtype=dtypes.typeclass(obj.dtype.type), shape=obj.shape) if symbolic.issymbolic(obj): return Scalar(symbolic.symtype(obj)) if isinstance(obj, dtypes.typeclass): return Scalar(obj) return Scalar(dtypes.typeclass(type(obj)))
def testInferExpr(self): code_str = "5 + 3.5" inf_type = type_inference.infer_expr_type(code_str) self.assertEqual(inf_type, dtypes.typeclass(float)) prev_symbols = {"n": dtypes.typeclass(int)} code_str = "5 + n" inf_type = type_inference.infer_expr_type(code_str, prev_symbols) self.assertEqual(inf_type, dtypes.typeclass(int)) #invalid code code_str = "a = 5 + 3.5" self.assertRaises(TypeError, lambda: type_inference.infer_expr_type(code_str)) prev_symbols = {"ul": dtypes.typeclass(float)} code_str = "min(ul, 0)" inf_type = type_inference.infer_expr_type(code_str, prev_symbols) self.assertEqual(inf_type, dtypes.typeclass(float))
def _Name(t, symbols, inferred_symbols): if t.id in cppunparse._py2c_reserved: return dtypes.typeclass(np.result_type(t.id)) else: # check if this name is a python type, it is in defined_symbols or in local symbols. # If yes, take the type inferred_type = None # if this is a statement generated from a tasklet with a dynamic memlet, it could have a leading * (pointer) t_id = t.id[1:] if t.id.startswith('*') else t.id if t_id.strip("()") in cppunparse._py2c_typeconversion: inferred_type = cppunparse._py2c_typeconversion[t_id.strip("()")] elif t_id in symbols: # defined symbols could have dtypes, in case convert it to typeclass inferred_type = symbols[t_id] if isinstance(inferred_type, np.dtype): inferred_type = dtypes.typeclass(inferred_type.type) elif t_id in inferred_symbols: inferred_type = inferred_symbols[t_id] return inferred_type
def testVarious(self): # code snippets that contains constructs not directly involved in type inference # (borrowed by astunparse tests) while_code = """def g(): while True: break z = 3 """ inf_symbols = type_inference.infer_types(while_code) self.assertEqual(inf_symbols["z"], dtypes.typeclass(int)) raise_from_code = """try: 1 / 0 except ZeroDivisionError as e: raise ArithmeticError from e """ inf_symbols = type_inference.infer_types(raise_from_code) try_except_finally_code = """try: suite1 except ex1: suite2 except ex2: suite3 else: suite4 finally: suite5 """ inf_symbols = type_inference.infer_types(try_except_finally_code) #function def with arguments function_def_return_code = """def f(arg : float): res = 5 + arg return res """ inf_symbols = type_inference.infer_types(function_def_return_code) self.assertEqual(inf_symbols["res"], dtypes.typeclass(float)) self.assertEqual(inf_symbols["arg"], dtypes.typeclass(float))
def get_internal_symbols() -> dict: """ Generates all internal symbols by crossing the internal function names with all possible type suffixes. Then defines the symbol with the corresponding return type (based on the suffix). """ res = {} for func, type in itertools.product(FUSED_OPERATION_TO_SVE, TYPE_TO_SVE_SUFFIX): res[f'{func}_{TYPE_TO_SVE_SUFFIX[type.type if isinstance(type, dace.dtypes.typeclass) else type]}'] = dtypes.vector( type if isinstance(type, dtypes.typeclass) else dtypes.typeclass(type), SVE_LEN) return res
def _infer_dtype(t: Union[ast.Name, ast.Attribute]): name = dace.frontend.python.astutils.rname(t) if '.' in name: dtype_str = name[name.rfind('.') + 1:] else: dtype_str = name dtype = getattr(dtypes, dtype_str, False) if isinstance(dtype, dtypes.typeclass): return dtype if isinstance(dtype, np.dtype): return dtypes.typeclass(dtype.type) return None
def create_datadescriptor(obj): """ Creates a data descriptor from various types of objects. @see: dace.data.Data """ from dace import dtypes # Avoiding import loops if isinstance(obj, Data): return obj elif hasattr(obj, '__descriptor__'): return obj.__descriptor__() elif hasattr(obj, 'descriptor'): return obj.descriptor elif isinstance(obj, (list, tuple, numpy.ndarray)): if isinstance(obj, (list, tuple)): # Lists and tuples are cast to numpy obj = numpy.array(obj) if obj.dtype.fields is not None: # Struct dtype = dtypes.struct( 'unnamed', **{ k: dtypes.typeclass(v[0].type) for k, v in obj.dtype.fields.items() }) else: dtype = dtypes.typeclass(obj.dtype.type) return Array(dtype=dtype, strides=tuple(s // obj.itemsize for s in obj.strides), shape=obj.shape) elif symbolic.issymbolic(obj): return Scalar(symbolic.symtype(obj)) elif isinstance(obj, dtypes.typeclass): return Scalar(obj) elif obj in {int, float, complex, bool, None}: return Scalar(dtypes.typeclass(obj)) elif callable(obj): # Cannot determine return value/argument types from function object return Scalar(dtypes.callback(None)) return Scalar(dtypes.typeclass(type(obj)))
def generate_constants(self, sdfg: SDFG, callsite_stream: CodeIOStream): # Write constants for cstname, cstval in sdfg.constants.items(): if isinstance(cstval, np.ndarray): dtype = dtypes.typeclass(cstval.dtype.type) const_str = "constexpr " + dtype.ctype + \ " " + cstname + "[" + str(cstval.size) + "] = {" it = np.nditer(cstval, order='C') for i in range(cstval.size - 1): const_str += str(it[0]) + ", " it.iternext() const_str += str(it[0]) + "};\n" callsite_stream.write(const_str, sdfg) else: callsite_stream.write( "constexpr auto %s = %s;\n" % (cstname, str(cstval)), sdfg)
def _Call(t, symbols, inferred_symbols): inf_type = _dispatch(t.func, symbols, inferred_symbols) # Dispatch the arguments and determine their types arg_types = [_dispatch(e, symbols, inferred_symbols) for e in t.args] for e in t.keywords: _dispatch(e, symbols, inferred_symbols) # If the function symbol is known, always return the defined type if inf_type: return inf_type # In case of a typeless math function, determine the return type based on the arguments name = dace.frontend.python.astutils.rname(t) idx = name.rfind('.') if idx > -1: module = name[:name.rfind('.')] else: module = '' if module == 'math': return dtypes.result_type_of(arg_types[0], *arg_types) # Reading from an Intel channel returns the channel type if name == 'read_channel_intel': return arg_types[0] if name in ('abs', 'log'): return arg_types[0] if name in ( 'min', 'max' ): # binary math operations that do not exist in the math module return dtypes.result_type_of(arg_types[0], *arg_types) if name in ('round', ): return dtypes.typeclass(int) # dtypes (dace.int32, np.float64) can be used as functions inf_type = _infer_dtype(t) if inf_type: return inf_type # In any other case simply return None return None
def unparse_tasklet(self, sdfg: SDFG, dfg: state.StateSubgraphView, state_id: int, node: nodes.Node, function_stream: CodeIOStream, callsite_stream: CodeIOStream): state_dfg: SDFGState = sdfg.nodes()[state_id] callsite_stream.write('\n///////////////////') callsite_stream.write(f'// Tasklet code ({node.label})') # Determine all defined symbols for the Unparser (for inference) # Constants and other defined symbols defined_symbols = state_dfg.symbols_defined_at(node) defined_symbols.update({ k: v.dtype if hasattr(v, 'dtype') else dtypes.typeclass(type(v)) for k, v in sdfg.constants.items() }) # All memlets of that node memlets = {} for edge in state_dfg.all_edges(node): u, uconn, v, vconn, _ = edge if u == node and uconn in u.out_connectors: defined_symbols.update({uconn: u.out_connectors[uconn]}) elif v == node and vconn in v.in_connectors: defined_symbols.update({vconn: v.in_connectors[vconn]}) body = node.code.code for stmt in body: stmt = copy.deepcopy(stmt) result = StringIO() dace.codegen.targets.sve.unparse.SVEUnparser( sdfg, dfg, self.current_map, self.cpu_codegen, stmt, result, body, memlets, util.get_loop_predicate(sdfg, dfg, node), self.counter_type, defined_symbols, self.stream_associations, self.wcr_associations) callsite_stream.write(result.getvalue(), sdfg, state_id, node) callsite_stream.write('///////////////////\n\n')
def testInputAST(self): # infer input parameter is an AST code_str = """var1 = int(in_x) var2: int = in_y var3 = 2.1 if (i>1 and i<10) else 2.1 # A comment res = var1 + var3 * var2 """ #create AST tree = ast.parse(code_str) defined_symbols = {"in_x": dtypes.typeclass(np.float32), "in_y": dtypes.typeclass(np.float32)} inf_symbols = type_inference.infer_types(code_str, defined_symbols) self.assertEqual(inf_symbols["var1"], dtypes.typeclass(int)) self.assertEqual(inf_symbols["var2"], dtypes.typeclass(int)) self.assertEqual(inf_symbols["var3"], dtypes.typeclass(float)) self.assertEqual(inf_symbols["res"], dtypes.typeclass(float))
def testSymbolic(self): # Define some sympy symbols to work with n = sp.Symbol('n') m = sp.Symbol('m') defined_symbols = {'n': dtypes.typeclass(np.float64)} inf_symbol = type_inference.infer_expr_type(n + 5, defined_symbols) self.assertEqual(inf_symbol, dtypes.typeclass(np.float64)) defined_symbols = {'n': dtypes.typeclass(np.int8)} inf_symbol = type_inference.infer_expr_type(n * 5, defined_symbols) self.assertEqual(inf_symbol, dtypes.typeclass(int)) defined_symbols = {'n': dtypes.typeclass(np.int8)} inf_symbol = type_inference.infer_expr_type(n * 5.0, defined_symbols) self.assertEqual(inf_symbol, dtypes.typeclass(int)) defined_symbols = {'n': dtypes.typeclass(np.int8)} inf_symbol = type_inference.infer_expr_type(n * 5.01, defined_symbols) self.assertEqual(inf_symbol, dtypes.typeclass(float)) defined_symbols = { 'n': dtypes.typeclass(np.int8), 'm': dtypes.typeclass(np.float32) } inf_symbol = type_inference.infer_expr_type(n * m + n, defined_symbols) self.assertEqual(inf_symbol, dtypes.typeclass(np.float32))
def testAssignmentIf(self): code_str = "res = 5 if x > 10 else 3.1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["res"], dtypes.typeclass(float))
def create_datadescriptor(obj): """ Creates a data descriptor from various types of objects. @see: dace.data.Data """ from dace import dtypes # Avoiding import loops if isinstance(obj, Data): return obj elif hasattr(obj, '__descriptor__'): return obj.__descriptor__() elif hasattr(obj, 'descriptor'): return obj.descriptor elif isinstance(obj, (list, tuple, numpy.ndarray)): if isinstance(obj, (list, tuple)): # Lists and tuples are cast to numpy obj = numpy.array(obj) if obj.dtype.fields is not None: # Struct dtype = dtypes.struct( 'unnamed', **{ k: dtypes.typeclass(v[0].type) for k, v in obj.dtype.fields.items() }) else: dtype = dtypes.typeclass(obj.dtype.type) return Array(dtype=dtype, strides=tuple(s // obj.itemsize for s in obj.strides), shape=obj.shape) # special case for torch tensors. Maybe __array__ could be used here for a more # general solution, but torch doesn't support __array__ for cuda tensors. elif type(obj).__module__ == "torch" and type(obj).__name__ == "Tensor": try: import torch return Array(dtype=dtypes.TORCH_DTYPE_TO_TYPECLASS[obj.dtype], strides=obj.stride(), shape=tuple(obj.shape)) except ImportError: raise ValueError( "Attempted to convert a torch.Tensor, but torch could not be imported" ) elif dtypes.is_gpu_array(obj): interface = obj.__cuda_array_interface__ dtype = dtypes.typeclass(numpy.dtype(interface['typestr']).type) itemsize = numpy.dtype(interface['typestr']).itemsize if len(interface['shape']) == 0: return Scalar(dtype, storage=dtypes.StorageType.GPU_Global) return Array(dtype=dtype, shape=interface['shape'], strides=(tuple(s // itemsize for s in interface['strides']) if interface['strides'] else None), storage=dtypes.StorageType.GPU_Global) elif symbolic.issymbolic(obj): return Scalar(symbolic.symtype(obj)) elif isinstance(obj, dtypes.typeclass): return Scalar(obj) elif (obj is int or obj is float or obj is complex or obj is bool or obj is None): return Scalar(dtypes.typeclass(obj)) elif isinstance(obj, type) and issubclass(obj, numpy.number): return Scalar(dtypes.typeclass(obj)) elif isinstance(obj, (Number, numpy.number, numpy.bool, numpy.bool_)): return Scalar(dtypes.typeclass(type(obj))) elif callable(obj): # Cannot determine return value/argument types from function object return Scalar(dtypes.callback(None)) elif isinstance(obj, str): return Scalar(dtypes.string()) raise TypeError( f'Could not create a DaCe data descriptor from object {obj}. ' 'If this is a custom object, consider creating a `__descriptor__` ' 'adaptor method to the type hint or object itself.')
def testSimpleAssignment(self): # simple assignment tests #bool code_str = "value=True" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(bool)) # int code_str = "value = 1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(int)) # float code_str = "value = 1.1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(float)) # string: should return a char* code_str = "value = 'hello'" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.pointer(dtypes.int8)) # assignment with previous symbols prev_symbols = {"char_num": dtypes.typeclass(np.int8)} code_str = "value = char_num" inf_symbols = type_inference.infer_types(code_str, prev_symbols) self.assertEqual(inf_symbols["value"], dtypes.typeclass(np.int8)) # aug assignment code_str = "value += 1.1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(float)) # annotated assignments code_str = "value : int = 1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(int)) code_str = "value : dace.int32 = 1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.int32) code_str = "value : numpy.float64 = 1" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.float64) code_str = "value : str" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.pointer(dtypes.int8)) # type conversion # in this case conversion is stricter (int-> int32) code_str = "value = int(1.1)" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(np.int)) code_str = "value = int32(1.1)" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.int32) code_str = "value = dace.float64(1.1)" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.float64) code_str = "value = float(1)" inf_symbols = type_inference.infer_types(code_str) self.assertEqual(inf_symbols["value"], dtypes.typeclass(np.float))
def _Num(t, symbols, inferred_symbols): # get the minimum between the minimum type needed to represent this number and the corresponding default data types # e.g., if num=1, then it will be represented by using the default integer type (int32 if C data types are used) return dtypes.result_type_of( dtypes.typeclass(type(t.n)), dtypes.typeclass(np.min_scalar_type(t.n).name))
def _NameConstant(t, symbols, inferred_symbols): return dtypes.result_type_of( dtypes.typeclass(type(t.value)), dtypes.typeclass(np.min_scalar_type(t.value).name))
def __init__(self): super().__init__(lambda: dtypes.typeclass(None))
def create_datadescriptor(obj, no_custom_desc=False): """ Creates a data descriptor from various types of objects. @see: dace.data.Data """ from dace import dtypes # Avoiding import loops if isinstance(obj, Data): return obj elif not no_custom_desc and hasattr(obj, '__descriptor__'): return obj.__descriptor__() elif not no_custom_desc and hasattr(obj, 'descriptor'): return obj.descriptor elif isinstance(obj, (list, tuple, numpy.ndarray)): if isinstance(obj, (list, tuple)): # Lists and tuples are cast to numpy obj = numpy.array(obj) if obj.dtype.fields is not None: # Struct dtype = dtypes.struct('unnamed', **{k: dtypes.typeclass(v[0].type) for k, v in obj.dtype.fields.items()}) else: dtype = dtypes.typeclass(obj.dtype.type) return Array(dtype=dtype, strides=tuple(s // obj.itemsize for s in obj.strides), shape=obj.shape) # special case for torch tensors. Maybe __array__ could be used here for a more # general solution, but torch doesn't support __array__ for cuda tensors. elif type(obj).__module__ == "torch" and type(obj).__name__ == "Tensor": try: # If torch is importable, define translations between typeclasses and torch types. These are reused by daceml. # conversion happens here in pytorch: # https://github.com/pytorch/pytorch/blob/143ef016ee1b6a39cf69140230d7c371de421186/torch/csrc/utils/tensor_numpy.cpp#L237 import torch TYPECLASS_TO_TORCH_DTYPE = { dtypes.bool_: torch.bool, dtypes.int8: torch.int8, dtypes.int16: torch.int16, dtypes.int32: torch.int32, dtypes.int64: torch.int64, dtypes.uint8: torch.uint8, dtypes.float16: torch.float16, dtypes.float32: torch.float32, dtypes.float64: torch.float64, dtypes.complex64: torch.complex64, dtypes.complex128: torch.complex128, } TORCH_DTYPE_TO_TYPECLASS = {v: k for k, v in TYPECLASS_TO_TORCH_DTYPE.items()} return Array(dtype=TORCH_DTYPE_TO_TYPECLASS[obj.dtype], strides=obj.stride(), shape=tuple(obj.shape)) except ImportError: raise ValueError("Attempted to convert a torch.Tensor, but torch could not be imported") elif dtypes.is_gpu_array(obj): interface = obj.__cuda_array_interface__ dtype = dtypes.typeclass(numpy.dtype(interface['typestr']).type) itemsize = numpy.dtype(interface['typestr']).itemsize if len(interface['shape']) == 0: return Scalar(dtype, storage=dtypes.StorageType.GPU_Global) return Array(dtype=dtype, shape=interface['shape'], strides=(tuple(s // itemsize for s in interface['strides']) if interface['strides'] else None), storage=dtypes.StorageType.GPU_Global) elif symbolic.issymbolic(obj): return Scalar(symbolic.symtype(obj)) elif isinstance(obj, dtypes.typeclass): return Scalar(obj) elif (obj is int or obj is float or obj is complex or obj is bool or obj is None): return Scalar(dtypes.typeclass(obj)) elif isinstance(obj, type) and issubclass(obj, numpy.number): return Scalar(dtypes.typeclass(obj)) elif isinstance(obj, (Number, numpy.number, numpy.bool, numpy.bool_)): return Scalar(dtypes.typeclass(type(obj))) elif obj is type(None): # NoneType is void * return Scalar(dtypes.pointer(dtypes.typeclass(None))) elif callable(obj): # Cannot determine return value/argument types from function object return Scalar(dtypes.callback(None)) elif isinstance(obj, str): return Scalar(dtypes.string()) raise TypeError(f'Could not create a DaCe data descriptor from object {obj}. ' 'If this is a custom object, consider creating a `__descriptor__` ' 'adaptor method to the type hint or object itself.')
def unparse_tasklet(sdfg, state_id, dfg, node, function_stream, callsite_stream, locals, ldepth, toplevel_schedule, codegen): if node.label is None or node.label == "": return "" state_dfg = sdfg.nodes()[state_id] # Not [], "" or None if not node.code: return "" # If raw C++ code, return the code directly if node.language != dtypes.Language.Python: # If this code runs on the host and is associated with a GPU stream, # set the stream to a local variable. max_streams = int( Config.get("compiler", "cuda", "max_concurrent_streams")) if (max_streams >= 0 and not is_devicelevel_gpu(sdfg, state_dfg, node) and hasattr(node, "_cuda_stream")): callsite_stream.write( 'int __dace_current_stream_id = %d;\n%sStream_t __dace_current_stream = __state->gpu_context->streams[__dace_current_stream_id];' % (node._cuda_stream, Config.get('compiler', 'cuda', 'backend')), sdfg, state_id, node, ) if node.language != dtypes.Language.CPP: raise ValueError( "Only Python or C++ code supported in CPU codegen, got: {}". format(node.language)) callsite_stream.write( type(node).__properties__["code"].to_string(node.code), sdfg, state_id, node) if hasattr(node, "_cuda_stream") and not is_devicelevel_gpu( sdfg, state_dfg, node): synchronize_streams(sdfg, state_dfg, state_id, node, node, callsite_stream) return body = node.code.code # Map local names to memlets (for WCR detection) memlets = {} for edge in state_dfg.all_edges(node): u, uconn, v, vconn, memlet = edge if u == node: memlet_nc = not is_write_conflicted( dfg, edge, sdfg_schedule=toplevel_schedule) memlet_wcr = memlet.wcr if uconn in u.out_connectors: conntype = u.out_connectors[uconn] else: conntype = None memlets[uconn] = (memlet, memlet_nc, memlet_wcr, conntype) elif v == node: if vconn in v.in_connectors: conntype = v.in_connectors[vconn] else: conntype = None memlets[vconn] = (memlet, False, None, conntype) # To prevent variables-redefinition, build dictionary with all the previously defined symbols defined_symbols = state_dfg.symbols_defined_at(node) defined_symbols.update({ k: v.dtype if hasattr(v, 'dtype') else dtypes.typeclass(type(v)) for k, v in sdfg.constants.items() }) for connector, (memlet, _, _, conntype) in memlets.items(): if connector is not None: defined_symbols.update({connector: conntype}) callsite_stream.write("// Tasklet code (%s)\n" % node.label, sdfg, state_id, node) for stmt in body: stmt = copy.deepcopy(stmt) rk = StructInitializer(sdfg).visit(stmt) if isinstance(stmt, ast.Expr): rk = DaCeKeywordRemover(sdfg, memlets, sdfg.constants, codegen).visit_TopLevelExpr(stmt) else: rk = DaCeKeywordRemover(sdfg, memlets, sdfg.constants, codegen).visit(stmt) if rk is not None: # Unparse to C++ and add 'auto' declarations if locals not declared result = StringIO() cppunparse.CPPUnparser(rk, ldepth + 1, locals, result, defined_symbols=defined_symbols) callsite_stream.write(result.getvalue(), sdfg, state_id, node)
def testArrayAccess(self): code_str = "tmp = array[i]" symbols = type_inference.infer_types( code_str, {"array": dtypes.typeclass(float)}) self.assertEqual(symbols["tmp"], dtypes.typeclass(float))
def _BoolOp(t, symbols, inferred_symbols): for v in t.values: _dispatch(v, symbols, inferred_symbols) return dtypes.typeclass(np.bool)