def visitDecl(self, ctx: SeeDotParser.DeclContext): shape = [ int(IntConst.getText()) for IntConst in ctx.intConstList().IntConst() ] range = float(ctx.FloatConst(0).getText()), float( ctx.FloatConst(1).getText()) return AST.Decl(shape, range)
def Placeholder(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): #curNodeShapeLi = curNode.getAttrMapRef()["\"shape\""].getShape().getDimRef() curNodeShapeLi = extraNodeInfoDict[curNode.getName()][0] curNodeInputType = curNode.getAttrMapRef()["\"dtype\""].getDataType() assert (curNodeInputType is not Graph.DataTypeEnum.DT_INVALID) # TODO : There has to be some way to take range, understand the dimensions for SeeDot # CHANGESRI # return (None, AST.Input(curNodeShapeLi, curNodeInputType.name)) return (None, AST.Decl(curNodeShapeLi, (-0.1, 0.1)))
def VariableV2(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): curNodeShapeLi = curNode.getAttrMapRef()["\"shape\""].getShape( ).getDimRef()[:] curNodeInputType = curNode.getAttrMapRef()["\"dtype\""].getDataType() # TODO_TAB : for inference, have commented out decl and inserted input nodes. # TODO : Right now in the current implementation, the dataType being passed to the node is being ignored by SeeDot. # Fix this later. # return (None, AST.Decl(curNodeShapeLi, curNodeInputType.name, None, None)) # NOTE : since this becomes an input node right now, i have also added to be prefixed at top in ProcessTFGraph::prefixAllPlaceHolderNodes() # CHANGESRI # return (None, AST.Input(curNodeShapeLi, curNodeInputType.name)) return (None, AST.Decl(curNodeShapeLi, [0.1, 0.1]))
def Const(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): assert (len(curNode.getInputsRef()) == 0) tensor = curNode.getAttrMapRef()["\"value\""].getTensor() curNodeDataType = curNode.getAttrMapRef()["\"dtype\""].getDataType() # create a different copy to not change the original copy curNodeShape = tensor.getShapeRef()[:] tensorConstantVal = tensor.getConstantVal() if tensorConstantVal is not None: # Use uinterpreted call of CreateTensor to create the tensor and fill it with a constant value dataPassed = None if curNodeDataType == Graph.DataTypeEnum.DT_INT32: dataPassed = AST.Int(tensorConstantVal, 32) elif curNodeDataType == Graph.DataTypeEnum.DT_FLOAT: dataPassed = AST.Float(tensorConstantVal) else: assert False if (len(curNodeShape) == 0): # This is a constant element retAST = dataPassed else: retAST = AST.UninterpFuncCall( curNodeShape, TFNodesAST.UninterpFuncCallNames.CreateTensor.name, [dataPassed], isSecret=False) else: # The tensor content is given as byte array. Extract val array from the byte array and create ast. if curNodeDataType == Graph.DataTypeEnum.DT_INT32: dataPassed = list( map(lambda x: AST.Int(x, 32), tensor.getContentAsValArr()[:])) elif curNodeDataType == Graph.DataTypeEnum.DT_FLOAT: dataPassed = list( map(lambda x: AST.Float(x), tensor.getContentAsValArr()[:])) else: assert False retAST = AST.Decl(curNodeShape, None, None, dataPassed, isSecret=False) return (None, retAST)
def Input(node, value_info, node_name_to_out_var_dict, init_val=None): if (DEBUG): print(node.outputs[0]) # There are two types of inputs dims = list(node.dims if hasattr(node, 'dims') else ( [val.dim_value for val in node.type.tensor_type.shape.dim])) data_type = node.data_type if hasattr( node, 'data_type') else node.type.tensor_type.elem_type # return AST.Input(dims, onnx2seedot(data_type)) from onnx import numpy_helper range = (-3, 3) if init_val is not None: arr = numpy_helper.to_array(init_val) range = (np.min(arr), np.max(arr)) return AST.Decl(dims, range)