def propagate_batch_normalization(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] input, scale, bias, mean, variance = op.inputs return ([infer.copy(input.shape)] + [[0] if output.shape == [0] else infer.copy(mean.shape) for output in op.outputs[1:]], [input.dtype] * len(op.outputs))
def propagate_fused_batch_norm(op, const_value_by_tensor): # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] format = (infer.Format.NCHW if "data_format" in op.attribs and op.attribs["data_format"][1].upper() == "C" else infer.Format.NHWC) input_shape = op.inputs[0].shape channel_shape = [input_shape[infer.channel_axis(format)]] return [infer.copy(input_shape), infer.copy(channel_shape), infer.copy(channel_shape), infer.copy(channel_shape), infer.copy(channel_shape)], [op.attribs['T']] * 5
def propagate_pool(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] filter_size = op.attribs['kernel_shape'] stride = op.attribs.get('strides', [1] * len(filter_size)) dilation = [1] * len(filter_size) padding = get_concrete_padding(auto_padding=op.attribs.get('auto_pad'), custom_padding=op.attribs.get('pads'), upscaled_shape=op.input.shape[2:], filter_shape=filter_size, stride=stride, dilation=dilation) output_shape = infer.sliding_window(input=op.input.shape, filter=[1, 1] + filter_size, padding=[(0, 0), (0, 0)] + padding, stride=[1, 1] + stride, dilation=[1, 1] + dilation) if len(op.outputs) == 1: return [output_shape], [op.input.dtype] elif len(op.outputs) == 2: # for max pool return [output_shape, infer.copy(output_shape)], [op.input.dtype, 'INT64'] else: assert False, 'Pooling only supported with 1 or 2 outputs'
def propagate_conv_transpose(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] input, filter = op.inputs[:2] if 'output_shape' in op.attribs: return [infer.copy(op.attribs['output_shape'])], [input.dtype] filter_size = filter.shape[2:] stride = op.attribs.get('strides', [1] * len(filter_size)) dilation = op.attribs.get('dilations', [1] * len(filter_size)) padding = get_concrete_padding(auto_padding=op.attribs.get('auto_pad'), custom_padding=op.attribs.get('pads'), upscaled_shape=input.shape[2:], filter_shape=filter_size, stride=stride, dilation=dilation) groups = op.attribs.get('group', 1) output_padding = op.attribs.get('output_padding', [0] * len(filter_size)) return [ infer.conv(input=input.shape, filter=filter_size, padding=padding, stride=stride, dilation=dilation, groups=groups, output_channels=filter.shape[1] * groups, format=infer.Format.NCHW, output_padding=list( zip([0] * len(filter_size), output_padding)), deconv=True) ], [input.dtype]
def propagate_cast(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] types = [ 'UNDEFINED', 'FLOAT', 'UINT8', 'INT8', 'UINT16', 'INT16', 'INT32', 'INT64', 'STRING', 'BOOL', 'FLOAT16', 'DOUBLE', 'UINT32', 'UINT64', 'COMPLEX64 ', 'COMPLEX128', 'BFLOAT16' ] # TODO move out maybe return [infer.copy(op.input.shape)], [ op.attribs['to'] if isinstance(op.attribs['to'], str) else types[op.attribs['to']] ]
def propagate_max_pool_with_argmax(op, const_value_by_tensor): # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] output_shape = infer.sliding_window( input=op.input.shape, filter=op.attribs['ksize'], padding=infer.Padding.SAME_UPPER if op.attribs["padding"].upper() == 'SAME' else infer.Padding.VALID, stride=op.attribs['strides'], dilation=[1] * len(op.attribs["strides"]), ) return [output_shape, infer.copy(output_shape)], [op.attribs['T'], op.attribs['Targmax']]
def propagate_max_unpool(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] input, index = op.inputs[:2] output_shape = (evaluate_shape_tensor_simple(op.inputs[2]) if len(op.inputs) >= 3 and not op.inputs[2].is_null else None) if output_shape is not None: return [infer.copy(output_shape)], [input.dtype] filter_size = op.attribs['kernel_shape'] stride = op.attribs.get('strides', [1] * len(filter_size)) dilation = [1] * len(filter_size) padding = to_nnef_padding( op.attribs.get('pads', [0] * 2 * len(filter_size))) output_shape = infer.sliding_window(input=input.shape, filter=[1, 1] + filter_size, padding=[(0, 0), (0, 0)] + padding, stride=[1, 1] + stride, dilation=[1, 1] + dilation, upscale=True) return [output_shape], [input.dtype]
def propagate_first(op, const_value_by_tensor): # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] return [infer.copy(op.inputs[0].shape)], [get_op_t(op)]
def test_copy(self): a = [1, 2, 3] b = infer.copy(a) self.assertEqual(a, b) self.assertIsNot(a, b)
def propagate_dropout(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] return [infer.copy(op.inputs[0].shape) for _ in op.outputs], [op.inputs[0].dtype for _ in op.outputs]
def propagate_first(op, dtype=None): # type: (ONNXOperation, typing.Optional[str])->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] return [infer.copy(op.inputs[0].shape) ], [op.inputs[0].dtype if dtype is None else dtype]
def propagate_cast(op, const_value_by_tensor): # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] return [infer.copy(op.inputs[0].shape)], [op.attribs['DstT']]
def forward_shape(op): return shapes.copy(op.inputs[0].shape)