Ejemplo n.º 1
0
 def test_matmul(self):
     self.assertEqual([3, 5], infer.matmul([3, 4], [4, 5]))
     self.assertEqual([3, 5], infer.matmul([4, 3], [4, 5], transpose_a=True))
     self.assertEqual([3, 5], infer.matmul([3, 4], [5, 4], transpose_b=True))
     self.assertEqual([10, 20, 3, 5], infer.matmul([10, 20, 4, 3],
                                                   [10, 20, 5, 4],
                                                   transpose_a=True,
                                                   transpose_b=True))
Ejemplo n.º 2
0
def propagate_matmul(op, const_value_by_tensor):
    # type: (TFOperation, _ConstValueByTensorT)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]

    a, b = op.inputs
    return (
        [infer.matmul(a.shape, b.shape, transpose_a=op.attribs["transpose_a"], transpose_b=op.attribs["transpose_b"])],
        [op.attribs['T']]
    )
Ejemplo n.º 3
0
def propagate_gemm(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    A, B = op.inputs[:2]
    assert all(s == 1 for s in A.shape[2:]) and all(s == 1
                                                    for s in B.shape[2:])
    return [
        infer.matmul(a=A.shape[:2],
                     b=B.shape[:2],
                     transpose_a=bool(op.attribs.get('transA', False)),
                     transpose_b=bool(op.attribs.get('transB', False)))
    ], [A.dtype]
Ejemplo n.º 4
0
def matmul_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    assert len(op.inputs) == 2
    A, B = op.inputs
    axis_a = op.attribs.get('axis_a', 1)
    axis_b = op.attribs.get('axis_b', 1)
    trans_a = op.attribs.get('trans_a', 0)
    trans_b = op.attribs.get('trans_b', 0)
    return infer.matmul(flatten_to_2d(A.shape, axis_a), flatten_to_2d(B.shape, axis_b),
                        transpose_a=trans_a, transpose_b=trans_b), \
           op.inputs[0].dtype
Ejemplo n.º 5
0
def propagate_gemm(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    A, B = op.inputs[:2]

    assert A.rank >= 2 and B.rank >= 2
    A_shape = [A.shape[0], utils.product(A.shape[1:])]
    B_shape = [B.shape[0], utils.product(B.shape[1:])]
    return [
        infer.matmul(a=A_shape,
                     b=B_shape,
                     transpose_a=bool(op.attribs.get('transA', False)),
                     transpose_b=bool(op.attribs.get('transB', False)))
    ], [A.dtype]
Ejemplo n.º 6
0
def convert_inner_product(converter, caffe_op, nnef_graph):
    # type: (Converter, CaffeOperation, NNEFGraph)->None

    input, weight = converter.converted_tensors(caffe_op.inputs[:2])
    output = converter.converted_tensor(caffe_op.output)

    transpose_a = False
    transpose_b = not caffe_op.attribs['transpose']

    axis = converter.nnef_axis(caffe_op.attribs["axis"], input.rank)

    if weight.rank == 4 and weight.shape[:2] == [1, 1]:
        weight.shape = weight.shape[2:]
        weight.data = weight.data.reshape(weight.shape)

    if axis > 1:
        weight.shape = [1] * (axis - 1) + weight.shape
        weight.data = weight.data.reshape(weight.shape)

    if axis != input.rank - 1:
        reshape_output = NNEFTensor(graph=nnef_graph,
                                    shape=[1] * int(axis == 0) +
                                    input.shape[:axis] +
                                    [utils.product(input.shape[axis:])],
                                    dtype=input.dtype)
        NNEFOperation(graph=nnef_graph,
                      name="reshape",
                      inputs=input,
                      outputs=reshape_output,
                      attribs=dict(shape=[1] * int(axis == 0) +
                                   input.shape[:axis] + [-1]))

        input = reshape_output

    if caffe_op.attribs["bias_term"] and transpose_b and axis == 1:
        bias = converter.converted_tensor(caffe_op.inputs[2])
        assert bias.rank == 1 or (bias.rank == 4
                                  and bias.shape[:3] == [1, 1, 1])
        bias.shape = [1, bias.shape[-1]]
        bias.data = bias.data.reshape(bias.shape)
        if axis > 1:
            bias.shape = [1] * (axis - 1) + bias.shape
            bias.data = bias.data.reshape(bias.shape)

        NNEFOperation(graph=nnef_graph,
                      name="linear",
                      inputs=(input, weight, bias),
                      outputs=output)
    elif caffe_op.attribs["bias_term"]:
        matmul_output = NNEFTensor(graph=nnef_graph,
                                   shape=infer.matmul(a=input.shape,
                                                      b=weight.shape,
                                                      transpose_a=transpose_a,
                                                      transpose_b=transpose_b),
                                   dtype=input.dtype)

        add_output = NNEFTensor(graph=nnef_graph,
                                shape=list(matmul_output.shape),
                                dtype=input.dtype) if axis == 0 else output

        NNEFOperation(graph=nnef_graph,
                      name="matmul",
                      inputs=(input, weight),
                      outputs=matmul_output,
                      attribs=dict(transposeA=transpose_a,
                                   transposeB=transpose_b))

        bias = converter.converted_tensor(caffe_op.inputs[2])
        assert bias.rank == 1 or (bias.rank == 4
                                  and bias.shape[:3] == [1, 1, 1])
        bias.shape = [1, bias.shape[-1]]
        bias.data = bias.data.reshape(bias.shape)
        if axis > 1:
            bias.shape = [1] * (axis - 1) + bias.shape
            bias.data = bias.data.reshape(bias.shape)

        NNEFOperation(graph=nnef_graph,
                      name="add",
                      inputs=(matmul_output, bias),
                      outputs=add_output)
        if axis == 0:
            NNEFOperation(graph=nnef_graph,
                          name="unsqueeze",
                          inputs=add_output,
                          outputs=output,
                          attribs=dict(axes=[0]))
    else:
        matmul_output = NNEFTensor(graph=nnef_graph,
                                   shape=infer.matmul(a=input.shape,
                                                      b=weight.shape,
                                                      transpose_a=transpose_a,
                                                      transpose_b=transpose_b),
                                   dtype=input.dtype) if axis == 0 else output

        NNEFOperation(graph=nnef_graph,
                      name="matmul",
                      inputs=(input, weight),
                      outputs=matmul_output,
                      attribs=dict(transposeA=transpose_a,
                                   transposeB=transpose_b))

        if axis == 0:
            NNEFOperation(graph=nnef_graph,
                          name="unsqueeze",
                          inputs=matmul_output,
                          outputs=output,
                          attribs=dict(axes=[0]))