Esempio n. 1
0
 def test_reduce(self):
     self.assertEqual([1, 1, 1, 1, 5],
                      infer.reduce([1, 2, 3, 4, 5], [1, 2, 3]))
     self.assertEqual([1, 5],
                      infer.reduce([1, 2, 3, 4, 5], [1, 2, 3],
                                   squeeze=True))
     self.assertEqual([], infer.reduce([5], [-1], squeeze=True))
     self.assertEqual([1, 6], infer.reduce([5, 6], [-2]))
Esempio n. 2
0
def propagate_reduce(
        op,  # type: ONNXOperation
        multi_axis,  # type: bool
        default=None,  # type: typing.Any
        dtype=None  # type: typing.Optional[str]
):
    # type: (...)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    if default is None:
        default = list(range(op.input.rank))

    if multi_axis:
        if 'axes' in op.attribs:
            axes = list(op.attribs['axes'])
        else:
            axes = list(default)
    else:
        if 'axis' in op.attribs:
            axes = [op.attribs['axis']]
        else:
            axes = list(default)

    return [
        infer.reduce(input=op.input.shape,
                     axes=axes,
                     squeeze=not op.attribs.get('keepdims', 1))
    ], [dtype if dtype is not None else op.input.dtype]
def propagate_reduce(op, const_value_by_tensor, dtype=None):
    # type: (TFOperation, _ConstValueByTensorT, str)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    input, axis = op.inputs
    axis = const_value_by_tensor[axis].tolist()  # type: typing.List[int]
    if not isinstance(axis, list):
        axis = [axis]
    return [infer.reduce(input=input.shape, axes=axis, squeeze=not op.attribs["keep_dims"])], \
           [op.attribs['T'] if not dtype else dtype]
Esempio n. 4
0
def sum_reduce_like_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    input, reference = op.inputs

    axis = op.attribs['axis']
    target = [
        None if i < axis or i >= axis + reference.rank else min(
            input.shape[i], reference.shape[i - axis])
        for i in range(input.rank)
    ]
    axes_keep = [
        i for i in range(input.rank) if target[i] == 1 and input.shape[i] != 1
    ]
    axes_squeeze = [i for i in range(input.rank) if target[i] is None]

    tmp_shape = infer.reduce(input.shape, axes_keep, squeeze=False)
    shape = infer.reduce(tmp_shape, axes_squeeze, squeeze=True)

    return shape, input.dtype
Esempio n. 5
0
def pool_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC'

    if op.attribs['global_pooling']:
        return infer.reduce(op.inputs[0].shape,
                            axes=list(range(1, op.inputs[0].rank - 1))
                            if is_nhwc else list(range(2, op.inputs[0].rank)),
                            squeeze=False), op.inputs[0].dtype

    def expand(list, default):
        if is_nhwc:
            return [default] + list + [default]
        else:
            return [default, default] + list

    return infer.sliding_window(
        input=op.inputs[0].shape,
        filter=expand(op.attribs['kernels'], 1),
        padding=expand(caffe2_pads_to_nnef_padding(op.attribs['pads']),
                       (0, 0)),
        stride=expand(op.attribs['strides'], 1),
        dilation=expand(op.attribs['dilations'], 1)), op.inputs[0].dtype
def propagate_argminmax(op, const_value_by_tensor):
    # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    input, axis = op.inputs
    axis = const_value_by_tensor[axis].tolist()  # type: int
    return [infer.reduce(input=input.shape, axes=[axis], squeeze=True)], [op.attribs['T']]
Esempio n. 7
0
def propagate_reduce(op, const_value_by_tensor):
    # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    input, axis = op.inputs
    axis = const_value_by_tensor[axis].tolist()  # type: typing.List[int]
    return [infer.reduce(input=input.shape, axes=axis, squeeze=not op.attribs["keep_dims"])], [op.attribs['T']]
Esempio n. 8
0
def reduce_shape(op, dtype=None):
    # type: (Caffe2Operation, typing.Optional[str])->ShapeResult
    axes = op.attribs['axes']
    keep_dims = op.attribs['keepdims']
    return infer.reduce(op.inputs[0].shape, axes=axes, squeeze=not keep_dims), \
           op.inputs[0].dtype if dtype is None else dtype
Esempio n. 9
0
def arg_min_max_shape(op, dtype=None):
    # type: (Caffe2Operation, typing.Optional[str])->ShapeResult
    axis = op.attribs.get('axis', -1)
    keep_dims = op.attribs.get('keepdims', 1)
    return infer.reduce(op.inputs[0].shape, axes=[axis], squeeze=not keep_dims), \
           op.inputs[0].dtype if dtype is None else dtype
Esempio n. 10
0
def reduction_shape(op):
    axis = op.attribs['axis']
    rank = len(op.inputs[0].shape)
    axes = list(range(axis + rank if axis < 0 else axis, rank))
    return shapes.reduce(op.inputs[0].shape, axes=axes, squeeze=True)