def propagate_pool(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] filter_size = op.attribs['kernel_shape'] stride = op.attribs.get('strides', [1] * len(filter_size)) dilation = [1] * len(filter_size) padding = get_concrete_padding(auto_padding=op.attribs.get('auto_pad'), custom_padding=op.attribs.get('pads'), upscaled_shape=op.input.shape[2:], filter_shape=filter_size, stride=stride, dilation=dilation) output_shape = infer.sliding_window(input=op.input.shape, filter=[1, 1] + filter_size, padding=[(0, 0), (0, 0)] + padding, stride=[1, 1] + stride, dilation=[1, 1] + dilation) if len(op.outputs) == 1: return [output_shape], [op.input.dtype] elif len(op.outputs) == 2: # for max pool return [output_shape, infer.copy(output_shape)], [op.input.dtype, 'INT64'] else: assert False, 'Pooling only supported with 1 or 2 outputs'
def unify_debox(op): # type: (NNEFOperation)->None input = op.inputs[0] if not op.attribs['stride']: op.attribs['stride'] = [1] * input.rank if not op.attribs['dilation']: op.attribs['dilation'] = [1] * input.rank if not op.attribs['padding']: calculated_output_shape = [ i * s for i, s in zip(input.shape, op.attribs['stride']) ] op.attribs['padding'] = infer.same_padding( upscaled_input=calculated_output_shape, filter=op.attribs['size'], stride=op.attribs['stride'], dilation=op.attribs['dilation']) else: calculated_output_shape = infer.sliding_window( input=input.shape, filter=op.attribs['size'], padding=op.attribs['padding'], stride=op.attribs['stride'], dilation=op.attribs['dilation'], upscale=True) if not op.attribs['output_shape']: op.attribs['output_shape'] = calculated_output_shape
def propagate_pool(op, const_value_by_tensor): # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] return [infer.sliding_window( input=op.input.shape, filter=op.attribs['ksize'], padding=infer.Padding.SAME_UPPER if op.attribs["padding"].upper() == 'SAME' else infer.Padding.VALID, stride=op.attribs['strides'], dilation=[1] * len(op.attribs["strides"]), )], [op.attribs['T']]
def pooling_shape(op): if op.attribs['global_pooling']: return op.inputs[0].shape[:2] + [1] * len(op.inputs[0].shape[2:]) rank = op.inputs[0].rank return shapes.sliding_window(input=op.inputs[0].shape, filter=(1, 1) + op.attribs['kernel_size'], padding=pairs((0, 0) + op.attribs['pad']), stride=(1, 1) + op.attribs['stride'], dilation=[1] * rank, ceil=True)
def propagate_max_unpool(op): # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]] input, index = op.inputs[:2] output_shape = (evaluate_shape_tensor_simple(op.inputs[2]) if len(op.inputs) >= 3 and not op.inputs[2].is_null else None) if output_shape is not None: return [infer.copy(output_shape)], [input.dtype] filter_size = op.attribs['kernel_shape'] stride = op.attribs.get('strides', [1] * len(filter_size)) dilation = [1] * len(filter_size) padding = to_nnef_padding( op.attribs.get('pads', [0] * 2 * len(filter_size))) output_shape = infer.sliding_window(input=input.shape, filter=[1, 1] + filter_size, padding=[(0, 0), (0, 0)] + padding, stride=[1, 1] + stride, dilation=[1, 1] + dilation, upscale=True) return [output_shape], [input.dtype]
def pool_shape(op): # type: (Caffe2Operation)->ShapeResult is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC' if op.attribs['global_pooling']: return infer.reduce(op.inputs[0].shape, axes=list(range(1, op.inputs[0].rank - 1)) if is_nhwc else list(range(2, op.inputs[0].rank)), squeeze=False), op.inputs[0].dtype def expand(list, default): if is_nhwc: return [default] + list + [default] else: return [default, default] + list return infer.sliding_window( input=op.inputs[0].shape, filter=expand(op.attribs['kernels'], 1), padding=expand(caffe2_pads_to_nnef_padding(op.attribs['pads']), (0, 0)), stride=expand(op.attribs['strides'], 1), dilation=expand(op.attribs['dilations'], 1)), op.inputs[0].dtype
def nnef_desample( input, # type: torch.Tensor index, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] output_shape=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor if output_shape and output_shape[0] != input.shape[0]: output_shape = list(output_shape) output_shape[0] = input.shape[0] input_shape = list(input.shape) rank = len(input_shape) spatial_dims = len(input_shape[2:]) if len(input_shape) not in (3, 4, 5): raise utils.NNEFToolsException( "Desample is only implemented for 3D, 4D, 5D tensors, given: {}D.". format(len(input_shape))) if size and size[:2] != [1, 1]: raise utils.NNEFToolsException( "Desample is only implemented for size = 1 in N and C dimensions.") if padding and padding[:2] != [(0, 0), (0, 0)]: raise utils.NNEFToolsException( "Desample is only implemented for padding = (0, 0) in N and C dimensions." ) if stride and stride[:2] != [1, 1]: raise utils.NNEFToolsException( "Desample is only implemented for stride = 1 in N and C dimensions." ) if dilation and not all(d == 1 for d in dilation): raise utils.NNEFToolsException( "Desample is only implemented for dilation = 1.") stride = [1] * rank if not stride else stride dilation = [1] * rank if not dilation else dilation if not padding: calculated_output_shape = [i * s for i, s in zip(input_shape, stride)] padding = shape_inference.same_padding( upscaled_input=calculated_output_shape, filter=size, stride=stride, dilation=dilation) else: calculated_output_shape = shape_inference.sliding_window( input=input_shape, filter=size, padding=padding, stride=stride, dilation=dilation, upscale=True) output_shape = output_shape if output_shape else calculated_output_shape padded_output_shape = [ s + p + q for s, (p, q) in zip(output_shape, padding) ] unpooled = { 1: F.max_unpool1d, 2: F.max_unpool2d, 3: F.max_unpool3d }[spatial_dims](input=input, indices=index, kernel_size=size[2:], stride=stride[2:], padding=0, output_size=padded_output_shape) return nnef_slice(unpooled, axes=list(range(rank)), begin=[p for p, _q in padding], end=[p + s for (p, _q), s in zip(padding, output_shape)])
def test_sliding_window(self): with self.assertRaises(AssertionError): infer.sliding_window(input=[1], filter=[], padding=[(1, 1)], stride=[1], dilation=[1]) with self.assertRaises(AssertionError): infer.sliding_window(input=[1], filter=[1], padding=[], stride=[1], dilation=[1]) with self.assertRaises(AssertionError): infer.sliding_window(input=[1], filter=[1], padding=[(1, 1)], stride=[], dilation=[1]) with self.assertRaises(AssertionError): infer.sliding_window(input=[1], filter=[1], padding=[(1, 1)], stride=[1], dilation=[]) with self.assertRaises(AssertionError): infer.sliding_window(input=[], filter=[1], padding=[(1, 1)], stride=[1], dilation=[1]) self.assertEqual([10, 30, 30, 3], infer.sliding_window(input=[10, 32, 32, 3], filter=[1, 3, 3, 1], padding=[(0, 0)] * 4, stride=[1, 1, 1, 1], dilation=[1, 1, 1, 1])) self.assertEqual([10, 32, 32, 3], infer.sliding_window(input=[10, 30, 30, 3], filter=[1, 3, 3, 1], padding=[(0, 0)] * 4, stride=[1, 1, 1, 1], dilation=[1, 1, 1, 1], upscale=True)) self.assertEqual([10, 28, 26, 3], infer.sliding_window(input=[10, 32, 32, 3], filter=[1, 3, 3, 1], padding=[(0, 0)] * 4, stride=[1, 1, 1, 1], dilation=[1, 2, 3, 1])) self.assertEqual([10, 15, 32, 3], infer.sliding_window(input=[10, 32, 32, 3], filter=[1, 3, 1, 1], padding=[(0, 0)] * 4, stride=[1, 2, 1, 1], dilation=[1, 1, 1, 1])) self.assertEqual([10, 16, 32, 3], infer.sliding_window(input=[10, 32, 32, 3], filter=[1, 3, 1, 1], padding=[(0, 0)] * 4, stride=[1, 2, 1, 1], dilation=[1, 1, 1, 1], ceil=True))