def typeCheckBroadcastOps(self, node: AST.BOp, eType: Type, fType: Type): # Ops which support broadcasting have different type checking # If adding a new op here which supports broadcasting, then be careful! # Currently, its assumed the op is commutative. If that is not true, following will be wrong ! assert node.op in [ AST.Operators.ADD, AST.Operators.SUB, AST.Operators.Equal, AST.Operators.ElemWiseMul, AST.Operators.ElemWiseDiv ] if isInt(eType) and isInt(fType): node.type = Int(eType.bitlen) elif isTensor(eType) and isTensor(fType): output_shape, _, _ = Util.getBroadcastShapes( eType.shape, fType.shape) node.type = Tensor(shape=output_shape, bitlen=eType.bitlen) elif isTensor(eType) and isInt(fType): output_shape, _, _ = Util.getBroadcastShapes(eType.shape, []) node.type = Tensor(shape=output_shape, bitlen=eType.bitlen) elif isInt(eType) and isTensor(fType): output_shape, _, _ = Util.getBroadcastShapes([], fType.shape) node.type = Tensor(shape=output_shape, bitlen=eType.bitlen) else: print(eType, fType) assert False node.type.taint = getTaint_type(eType, fType) node.type.isSecret = eType.isSecret | fType.isSecret return node.type
def visitBopConv(self, node: AST.BOp, eType: Type, fType: Type, args=None): assert isTensor(eType) and isTensor(fType) convDim = 2 group = 1 if AST.PaddingKeysDict.ConvDim in node.options: convDim = node.options[AST.PaddingKeysDict.ConvDim] if convDim == 2: assert eType.dim == 4 and fType.dim == 4 elif convDim == 3: assert eType.dim == 5 and fType.dim == 5 else: assert False N = D = H = W = CI = FD = FH = FW = CI1 = CO = -1 newD = -1 if convDim == 2: [N, H, W, CI] = eType.shape [FH, FW, CI1, CO] = fType.shape elif convDim == 3: [N, D, H, W, CI] = eType.shape [FD, FH, FW, CI1, CO] = fType.shape assert FD == node.options[AST.PaddingKeysDict.FD] zPadDLeft = node.options[AST.PaddingKeysDict.zPadDLeft] zPadDRight = node.options[AST.PaddingKeysDict.zPadDRight] strideD = node.options[AST.PaddingKeysDict.strideD] newD = ((D + zPadDLeft + zPadDRight - FD) // strideD) + 1 else: assert False if AST.PaddingKeysDict.group in node.options: group = node.options[AST.PaddingKeysDict.group] assert FH == node.options[AST.PaddingKeysDict.FH] assert FW == node.options[AST.PaddingKeysDict.FW] assert CI1 * group == CI, "FCI={} group={} CI={}".format( CI1, group, CI) zPadHLeft = node.options[AST.PaddingKeysDict.zPadHLeft] zPadHRight = node.options[AST.PaddingKeysDict.zPadHRight] zPadWLeft = node.options[AST.PaddingKeysDict.zPadWLeft] zPadWRight = node.options[AST.PaddingKeysDict.zPadWRight] strideH = node.options[AST.PaddingKeysDict.strideH] strideW = node.options[AST.PaddingKeysDict.strideW] newH = ((H + zPadHLeft + zPadHRight - FH) // strideH) + 1 newW = ((W + zPadWLeft + zPadWRight - FW) // strideW) + 1 if convDim == 2: shape = [N, newH, newW, CO] elif convDim == 3: shape = [N, newD, newH, newW, CO] node.type = Tensor( shape, eType.bitlen, eType.isSecret | fType.isSecret, getTaint_type(eType, fType), ) return node.type
def typeCheckBroadcastOps(self, node: AST.BOp, eType: Type, fType: Type): # Ops which support broadcasting have different type checking # If adding a new op here which supports broadcasting, then be careful! # Currently, its assumed the op is commutative. If that is not true, following will be wrong ! assert node.op in [ AST.Operators.ADD, AST.Operators.ElemWiseMul, AST.Operators.ElemWiseDiv ] if (len(eType.shape) < len(fType.shape)): # swap expr1 and expr2 -- this is valid for commutative ops # be careful for ops which are not commutative temp = node.expr1 node.expr1 = node.expr2 node.expr2 = temp temp = eType eType = fType fType = temp # Now true that dim(eType) >= dim(fTYpe) assert len(eType.shape) >= len(fType.shape) if isInt(eType) and isInt(fType): node.type = Int(eType.bitlen, eType.isSecret) elif isTensor(eType) and isTensor(fType): revETypeShape = eType.shape[::-1] revFTypeShape = fType.shape[::-1] for i, fTypeCurDim in enumerate(revFTypeShape): eTypeCurDim = revETypeShape[i] if not (eTypeCurDim == 1 or fTypeCurDim == 1 or eTypeCurDim == fTypeCurDim): # broadcast not possible - raise error print("Broadcast not possible for current node.", eType.shape, fType.shape) assert False # Broadcast possible node.type = eType else: print(eType, fType) assert False return node.type
def visitBopMul(self, node: AST.BOp, eType: Type, fType: Type, args=None): if isInt(eType) and isInt(fType): node.type = Int(eType.bitlen, eType.isSecret) elif isTensor(eType) and isTensor(fType): if eType.dim == 0: node.type = fType elif fType.dim == 0: node.type = eType else: assert eType.dim == 2 and fType.dim == 2 [n1, n2] = eType.shape [n3, n4] = fType.shape assert n2 == n3 node.type = Tensor([n1, n4]) else: print("Error: Unknown condition in type checking.", file=sys.stderr) assert (False) return node.type
def visitBopConv(self, node: AST.BOp, eType: Type, fType: Type, args=None): assert isTensor(eType) and isTensor(fType) convDim = 2 group = 1 if AST.PaddingKeysDict.ConvDim in node.options: convDim = node.options[AST.PaddingKeysDict.ConvDim] if convDim == 2: assert eType.dim == 4 and fType.dim == 4 elif convDim == 3: assert eType.dim == 5 and fType.dim == 5 else: assert (False) N = D = H = W = CI = FD = FH = FW = CI1 = CO = -1 newD = -1 if (convDim == 2): [N, H, W, CI] = eType.shape [FH, FW, CI1, CO] = fType.shape elif (convDim == 3): [N, D, H, W, CI] = eType.shape [FD, FH, FW, CI1, CO] = fType.shape assert (FD == node.options[AST.PaddingKeysDict.FD]) zPadDLeft = node.options[AST.PaddingKeysDict.zPadDLeft] zPadDRight = node.options[AST.PaddingKeysDict.zPadDRight] strideD = node.options[AST.PaddingKeysDict.strideD] newD = ((D + zPadDLeft + zPadDRight - FD) // strideD) + 1 else: assert (False) if AST.PaddingKeysDict.group in node.options: group = node.options[AST.PaddingKeysDict.group] assert (FH == node.options[AST.PaddingKeysDict.FH]) assert (FW == node.options[AST.PaddingKeysDict.FW]) assert (CI1 * group == CI) zPadHLeft = node.options[AST.PaddingKeysDict.zPadHLeft] zPadHRight = node.options[AST.PaddingKeysDict.zPadHRight] zPadWLeft = node.options[AST.PaddingKeysDict.zPadWLeft] zPadWRight = node.options[AST.PaddingKeysDict.zPadWRight] strideH = node.options[AST.PaddingKeysDict.strideH] strideW = node.options[AST.PaddingKeysDict.strideW] newH = ((H + zPadHLeft + zPadHRight - FH) // strideH) + 1 newW = ((W + zPadWLeft + zPadWRight - FW) // strideW) + 1 if convDim == 2: shape = [N, newH, newW, CO] elif convDim == 3: shape = [N, newD, newH, newW, CO] node.type = Tensor(shape) return node.type
def visitBopAddLike(self, node: AST.BOp, eType: Type, fType: Type, args=None): if isInt(eType) and isInt(fType): pass elif isTensor(eType) and isTensor(fType): assert eType.shape == fType.shape else: assert False node.type = eType return node.type
def visitBopDiv(self, node: AST.BOp, eType: Type, fType: Type, args=None): if isInt(eType) and isInt(fType): node.type = Int(eType.bitlen, eType.isSecret) elif isTensor(eType) and isTensor(fType): if eType.dim == 0: node.type = copy.copy(fType) elif fType.dim == 0: node.type = copy.copy(eType) else: assert eType.dim == 2 and fType.dim == 2 [n1, n2] = eType.shape [n3, n4] = fType.shape assert n2 == n3 node.type = Tensor([n1, n4], eType.bitlen) else: print("Error: Unknown condition in type checking.", file=sys.stderr) assert False node.type.taint = getTaint_type(eType, fType) node.type.isSecret = eType.isSecret | fType.isSecret return node.type
def visitBopConvTranspose(self, node: AST.BOp, eType: Type, fType: Type, args=None): assert isTensor(eType) and isTensor(fType) convDim = 2 if AST.PaddingKeysDict.ConvDim in node.options: convDim = node.options[AST.PaddingKeysDict.ConvDim] if convDim == 2: [N, HP, WP, CI1] = eType.shape [FH, FW, CO, CI] = fType.shape elif convDim == 3: [N, DP, HP, WP, CI1] = eType.shape [FD, FH, FW, CO, CI] = fType.shape else: assert False assert CI1 == CI if convDim == 3: outputImgD = node.options[AST.PaddingKeysDict.outputImgD] outputImgH = node.options[AST.PaddingKeysDict.outputImgH] outputImgW = node.options[AST.PaddingKeysDict.outputImgW] if convDim == 2: shape = [N, outputImgH, outputImgW, CO] else: shape = [N, outputImgD, outputImgH, outputImgW, CO] # Logic explanation: # ConvTranpose can be thought of as the inverse of some convolution for which it is doing the upsampling. # For calculation of padding in the convTranspose operation, the output image size is required. # This is why TF also mandates the operator to be specified with output size. # This conv transpose operation can be thought of as conv between output # of size shape = [N, outputImgH, outputImgW, CI], and filter of size [FH, FW, CI, CO]. # Hence, the input for this convTranspose would be [N, HP, WP, CO] node.type = Tensor( shape, eType.bitlen, eType.isSecret | fType.isSecret, getTaint_type(eType, fType), ) return node.type
def visitBopConv(self, node: AST.BOp, eType: Type, fType: Type, args=None): assert isTensor(eType) and isTensor(fType) assert eType.dim == 4 and fType.dim == 4 [N, H, W, CI] = eType.shape [FH, FW, CI1, CO] = fType.shape assert (FH == node.options[AST.PaddingKeysDict.FH]) assert (FW == node.options[AST.PaddingKeysDict.FW]) assert (CI1 == CI) zPadHLeft = node.options[AST.PaddingKeysDict.zPadHLeft] zPadHRight = node.options[AST.PaddingKeysDict.zPadHRight] zPadWLeft = node.options[AST.PaddingKeysDict.zPadWLeft] zPadWRight = node.options[AST.PaddingKeysDict.zPadWRight] strideH = node.options[AST.PaddingKeysDict.strideH] strideW = node.options[AST.PaddingKeysDict.strideW] newH = ((H + zPadHLeft + zPadHRight - FH) // strideH) + 1 newW = ((W + zPadWLeft + zPadWRight - FW) // strideW) + 1 shape = [N, newH, newW, CO] node.type = Tensor(shape) return node.type