def TruncatedNormal(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): curNodeDataType = curNode.getAttrMapRef()["\"dtype\""].getDataType() assert (curNodeDataType is not Graph.DataTypeEnum.DT_INVALID) inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 1) curNodeOutputShape = extraNodeInfoDict[curNode.getName()][0] return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.TruncatedNormal.name, [AST.ID(curNodeDataType.name)] + list(map(lambda x: AST.Int(x), curNodeOutputShape))) ) # TODO
def Reshape(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 2) return (None, AST.Reshape(AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), extraNodeInfoDict[curNode.getName()][0], None))
def ArgMax(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 2) return (None, AST.ArgMax(extraNodeInfoDict[curNode.getName()][0], AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), AST.ID(dictNodeNameToOutVarStr[inputsRef[1]]), extraNodeInfoDict[inputsRef[0]][0]))
def ExpandDims(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 2) retAST = AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.ExpandDims.name, list(map(lambda x: AST.ID(dictNodeNameToOutVarStr[x]), inputsRef))) return (None, retAST)
def Slice(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 3) curNodeDataType = curNode.getAttrMapRef()["\"T\""].getDataType() curNodeShapeASTLi = list( map(lambda x: AST.Int(x), extraNodeInfoDict[curNode.getName()][0])) retAST = AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.CreateCopy.name, [ AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), # of this # begin idx AST.ID(dictNodeNameToOutVarStr[inputsRef[1]]), # size AST.ID(dictNodeNameToOutVarStr[inputsRef[2]]) ]) return (None, retAST)
def Placeholder(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): #curNodeShapeLi = curNode.getAttrMapRef()["\"shape\""].getShape().getDimRef() curNodeShapeLi = extraNodeInfoDict[curNode.getName()][0] curNodeInputType = curNode.getAttrMapRef()["\"dtype\""].getDataType() assert (curNodeInputType is not Graph.DataTypeEnum.DT_INVALID) # TODO : There has to be some way to take range, understand the dimensions for SeeDot # CHANGESRI # return (None, AST.Input(curNodeShapeLi, curNodeInputType.name)) return (None, AST.Decl(curNodeShapeLi, (-0.1, 0.1)))
def MaxPoolGrad(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.MaxPoolGrad.name, list( map(lambda x: AST.ID(dictNodeNameToOutVarStr[x]), inputsRef))))
def Tile(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 2) return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.Tile.name, [ AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), AST.ID(dictNodeNameToOutVarStr[inputsRef[1]]) ]))
def ZerosLike(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 1) curNodeOutputType = curNode.getAttrMapRef()["\"T\""].getDataType() assert (curNodeOutputType is not Graph.DataTypeEnum.DT_INVALID) retAST = AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.CreateTensor.name, [AST.Int(0)], isSecret=False) return (None, retAST)
def Pack(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() N = curNode.getAttrMapRef()["\"N\""].getI() axis = curNode.getAttrMapRef()["\"axis\""].getI() assert (len(inputsRef) == N) retAST = AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.Pack.name, list(map(lambda x: AST.ID(dictNodeNameToOutVarStr[x]), inputsRef)) + [AST.Int(axis)]) return (None, retAST)
def RandomUniform(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): curNodeDataType = curNode.getAttrMapRef()["\"dtype\""].getDataType() assert (curNodeDataType is not Graph.DataTypeEnum.DT_INVALID) inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 1) curNodeOutputShape = extraNodeInfoDict[curNode.getName()][0] return (None, AST.UninterpFuncCall( curNodeOutputShape, TFNodesAST.UninterpFuncCallNames.RandomUniform.name, [AST.ID(curNodeDataType.name)]))
def Cast(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 1) sourceType = curNode.getAttrMapRef()["\"SrcT\""].getDataType() destType = curNode.getAttrMapRef()["\"DstT\""].getDataType() return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.Cast.name, [ AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), AST.ID(sourceType.name), AST.ID(destType.name) ]))
def ConcatV2(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() N = curNode.getAttrMapRef()["\"N\""].getI() assert (len(inputsRef) == N + 1) # One extra for axis # TODO : Since the axis of concat is constant, therefore, its known here - the input's sizes along that dim should be # passed as input to the below function. # For now hardcoding. retAST = AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.Concat.name + str(N) + 'T', list(map(lambda x: AST.ID(dictNodeNameToOutVarStr[x]), inputsRef)), outputDiffInpDims=1) return (None, retAST)
def Assign(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 2) curNodeShape = extraNodeInfoDict[curNode.getName()][0] # TODO_TAB : for inference, have commented the copyTensor function calls. # TODO : Hack -- fix this later after discussing with Aseem # return (None, AST.UninterpFuncCall(curNodeShape, # TFNodesAST.UninterpFuncCallNames.CopyTensor.name, # [AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), # AST.ID(dictNodeNameToOutVarStr[inputsRef[1]])])) return (None, None)
def Identity(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): # In SeeDot, J2=J1 creates a new reference for J1 -- so # the corresponding code in Seedot cannot simply be J2 = J1. # Instead create a new tensor first and then assign the old one to the new one. inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 1) curNodeDataType = curNode.getAttrMapRef()["\"T\""].getDataType() assert (curNodeDataType is not Graph.DataTypeEnum.DT_INVALID) curNodeShape = extraNodeInfoDict[curNode.getName()][0] retAST = AST.UninterpFuncCall( curNodeShape, TFNodesAST.UninterpFuncCallNames.CreateIdentity.name, [AST.ID(dictNodeNameToOutVarStr[inputsRef[0]])]) return (None, retAST)
def Squeeze(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): # TODO : Do this in somewhat better way inputsRef = curNode.getInputsRef() inputTensorShape = extraNodeInfoDict[inputsRef[0]][0] inputTensorRank = len(inputTensorShape) squeezeDims = curNode.getAttrMapRef()["\"squeeze_dims\""].getList( ).getILi() squeezeDimsRank = len(squeezeDims) return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.Squeeze.name, list(map(lambda x: AST.Int(x, 32), squeezeDims)) + [AST.ID(dictNodeNameToOutVarStr[inputsRef[0]])]))
def Fill(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 2) curNodeOutputShape = extraNodeInfoDict[inputsRef[0]][0] # inputsRef[0] denotes a shape and should have a rank of 1 assert (len(curNodeOutputShape) == 1) curNodeOutputType = curNode.getAttrMapRef()["\"T\""].getDataType() assert (curNodeOutputType is not Graph.DataTypeEnum.DT_INVALID) retAST = AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.CreateTensor.name, [AST.ID(dictNodeNameToOutVarStr[inputsRef[1]])], isSecret=False) return (None, retAST)
def AvgPool(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): inputsRef = curNode.getInputsRef() assert (len(inputsRef) == 1) options = {} stridesUsed = curNode.getAttrMapRef()["\"strides\""].getList().getILi() assert ((stridesUsed[0] == 1) and (stridesUsed[3] == 1)) strideH = stridesUsed[1] strideW = stridesUsed[2] kSizeUsed = curNode.getAttrMapRef()["\"ksize\""].getList().getILi() assert ((kSizeUsed[0] == 1) and (kSizeUsed[3] == 1)) kSizeH = kSizeUsed[1] kSizeW = kSizeUsed[2] paddingUsedStr = curNode.getAttrMapRef()["\"padding\""].getS() zPadH = zPadW = -1 if (paddingUsedStr == "\"SAME\""): zPadH = int((kSizeH - 1) / 2) zPadW = int((kSizeW - 1) / 2) elif (paddingUsedStr == "\"VALID\""): zPadH = zPadW = 0 else: zPadH = zPadW = -1 inputShape = extraNodeInfoDict[inputsRef[0]][0] imgH = inputShape[1] imgW = inputShape[2] return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.AvgPool.name, [ AST.Int(kSizeH, 32), AST.Int(kSizeW, 32), AST.Int(zPadH, 32), AST.Int(zPadW, 32), AST.Int(strideH, 32), AST.Int(strideW, 32), AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]) ]))
def FusedBatchNorm(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): # NOTE : Since the weights to this layer will be scaled appropriately, this op will become identity. inputsRef = curNode.getInputsRef() # TODO : This below thing is the right way of implementing the operator # For now using uninterpreted function call. # tempAst = AST.BOp(AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), # TFNodesAST.getOperatorsIdx('*'), # AST.ID(dictNodeNameToOutVarStr[inputsRef[1]]) # ) # return (None, AST.BOp(tempAst, # TFNodesAST.getOperatorsIdx('+'), # AST.ID(dictNodeNameToOutVarStr[inputsRef[2]]) # )) return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.TempFusedBatchNorm.name, [ AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), AST.ID(dictNodeNameToOutVarStr[inputsRef[1]]), AST.ID(dictNodeNameToOutVarStr[inputsRef[2]]), ]))
def Pad(graph: Graph.Graph, curNode: Graph.Node, dictNodeNameToOutVarStr: dict, extraNodeInfoDict: dict): # Mode refers to 'CONSTANT', 'REFLECT' or 'SYMMETRIC' mode = 0 if ("\"mode\"" in curNode.getAttrMapRef()): mode = curNode.getAttrMapRef()["\"mode\""].getI() constant_values = 0 if ("\"constant_values\"" in curNode.getAttrMapRef()): constant_values = curNode.getAttrMapRef( )["\"constant_values\""].getI() # For now to make life easy - deal with SYMMETRIC AND REFLECT when time comes assert (mode == 0 and constant_values == 0) inputsRef = curNode.getInputsRef() inputTensorShapeLi = extraNodeInfoDict[inputsRef[0]][0] return (None, AST.UninterpFuncCall( extraNodeInfoDict[curNode.getName()][0], TFNodesAST.UninterpFuncCallNames.Pad.name, [ AST.ID(dictNodeNameToOutVarStr[inputsRef[0]]), AST.ID(dictNodeNameToOutVarStr[inputsRef[1]]) ], outputDiffInpDims=1))