Exemple #1
0
    def test_bn(self):  # type: () -> None
        scale = from_array(_random_array((3,)), name="scale")
        bias = from_array(_random_array((3,)), name="bias")
        mean = from_array(_random_array((3,)), name="mean")
        var = from_array(_random_array((3,)), name="var")

        epsilon = 1e-5
        momentum = 0.001

        op_types = ["BatchNormalization", "SpatialBN"]
        for op_type in op_types:
            _test_single_node(
                "BatchNormalization",
                [(1, 3, 224, 224)],
                [(1, 3, 224, 224)],
                initializer=[scale, bias, mean, var],
                epsilon=epsilon,
                momentum=momentum
            )

            # epsilon by default
            _test_single_node(
                "BatchNormalization",
                [(1, 3, 224, 224)],
                [(1, 3, 224, 224)],
                initializer=[scale, bias, mean, var],
                # epsilon=epsilon,
                momentum=momentum
            )
Exemple #2
0
 def test_gemm(self):  # type: () -> None
     input_shape = (1, 2048)
     output_shape = (1, 5)
     W = from_array(_random_array((output_shape[1], input_shape[1])),
                    name="weight")
     b = from_array(_random_array((output_shape[1], )), name="bias")
     _test_single_node("Gemm", [input_shape], [output_shape],
                       initializer=[W, b],
                       decimal=3,
                       transB=1)
 def test_gemm_transB_off(self, target_ios='12'):  # type: () -> None
     input_shape = (1, 2048)
     output_shape = (1, 5)
     W = from_array(_random_array((input_shape[1], output_shape[1])),
                    name="weight")
     b = from_array(_random_array((output_shape[1], )), name="bias")
     _test_single_node("Gemm", [input_shape], [output_shape],
                       initializer=[W, b],
                       decimal=3,
                       transB=0,
                       target_ios=target_ios)
    def test_fuse_conv_without_bias(self):  # type: () -> None
        kernel_shape = (3, 2)
        strides = (2, 3)
        pads = (4, 2, 4, 2)
        dilations = (1, 2)
        group = 1
        weight = numpy_helper.from_array(
            _random_array((16, 3, 3, 2)), name="weight"
        )

        input_shape = (1, 3, 224, 224)
        output_size = _conv_pool_output_size(input_shape, dilations,
                                             kernel_shape, pads, strides)

        output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])

        inputs = [('input0', input_shape)]
        outputs = [('output0', output_shape, TensorProto.FLOAT)]

        conv = helper.make_node(
            "Conv",
            inputs=[inputs[0][0], "weight"],
            outputs=["conv_output"],
            dilations=dilations,
            group=group,
            kernel_shape=kernel_shape,
            pads=pads,
            strides=strides
        )

        b = _random_array((int(weight.dims[0]),))
        bias = numpy_helper.from_array(
            b, name="bias"
        )

        add = helper.make_node(
            "Add",
            inputs=[conv.output[0], "bias"],
            outputs=[outputs[0][0]],
            broadcast=1,
            axis=1
        )

        model = _onnx_create_model(
            [conv, add], inputs, outputs, [weight, bias]
        )
        graph_ = Graph.from_onnx(model.graph, onnx_ir_version=5)
        fused_graph = graph_.transformed([ConvAddFuser()])

        self.assertEqual(len(fused_graph.nodes), 1)
        node = fused_graph.nodes[0]
        self.assertEqual(len(node.inputs), 3)
        npt.assert_equal(node.input_tensors[node.inputs[2]], b)
        self.assertEqual(fused_graph.nodes[0].outputs[0], outputs[0][0])
Exemple #5
0
 def test_gemm(self,
               minimum_ios_deployment_target='12'):  # type: () -> None
     input_shape = (1, 2048)
     output_shape = (1, 5)
     W = from_array(_random_array((output_shape[1], input_shape[1])),
                    name="weight")
     b = from_array(_random_array((output_shape[1], )), name="bias")
     _test_single_node(
         "Gemm", [input_shape], [output_shape],
         initializer=[W, b],
         decimal=3,
         transB=1,
         minimum_ios_deployment_target=minimum_ios_deployment_target)
Exemple #6
0
    def test_conv_transpose(self):  # type: () -> None
        kernel_shape = (3, 3)
        pads = (0, 0, 0, 0)
        C_in  = 3
        C_out = 12
        H_in, W_in = 30, 30
        strides = (2, 2)

        input_shape = (1, C_in, H_in, W_in)
        weight = from_array(_random_array((C_in, C_out, kernel_shape[0], kernel_shape[1])),
                            name="weight")

        H_out = (H_in-1) * strides[0] + kernel_shape[0] - pads[0] - pads[2]
        W_out = (W_in-1) * strides[1] + kernel_shape[1] - pads[1] - pads[3]
        output_shape = (1, C_out, H_out, W_out)

        _test_single_node(
            "ConvTranspose",
            [input_shape],
            [output_shape],
            initializer=[weight],
            # Default values for other attributes: dilations=[1, 1], group=1
            strides = strides,
            kernel_shape=kernel_shape,
            pads=pads,
            output_padding=(0, 0)
        )
Exemple #7
0
    def test_conv(self):  # type: () -> None
        kernel_shape = (3, 2)
        strides = (2, 3)
        pads = (4, 2, 4, 2)
        dilations = (1, 2)
        group = 1
        weight = from_array(_random_array((16, 3, 3, 2)), name="weight")

        input_shape = (1, 3, 224, 224)
        output_size = _conv_pool_output_size(input_shape, dilations,
                                             kernel_shape, pads, strides)

        output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])

        _test_single_node(
            "Conv",
            [input_shape],
            [output_shape],
            initializer=[weight],
            dilations=dilations,
            group=group,
            kernel_shape=kernel_shape,
            pads=pads,
            strides=strides
        )
Exemple #8
0
    def test_dropout_remover(self):  # type: () -> None
        inputs = [('input', (1, 3, 50, 50))]
        outputs = [('out', (1, 5, 50, 50), TensorProto.FLOAT)]
        weight = numpy_helper.from_array(_random_array((5, 3, 1, 1)),
                                         name="weight")
        conv = helper.make_node("Conv",
                                inputs=["input", "weight"],
                                outputs=["conv_output"],
                                kernel_shape=(1, 1),
                                strides=(1, 1))
        drop = helper.make_node(
            "Dropout",
            inputs=["conv_output"],
            outputs=["drop_output"],
        )
        exp = helper.make_node("Exp", inputs=["drop_output"], outputs=['out'])

        onnx_model = _onnx_create_model([conv, drop, exp], inputs, outputs)

        graph = Graph.from_onnx(onnx_model.graph)
        new_graph = graph.transformed([DropoutRemover()])
        self.assertEqual(len(graph.nodes), 3)
        self.assertEqual(len(new_graph.nodes), 2)
        self.assertEqual(new_graph.nodes[0].inputs[0], 'input')
        self.assertEqual(new_graph.nodes[1].inputs[0],
                         new_graph.nodes[0].outputs[0])
        self.assertEqual(new_graph.nodes[1].outputs[0], 'out')
Exemple #9
0
 def test_gemm_transB_off(self, disable_rank5_mapping=False):  # type: () -> None
     input_shape = (1, 2048)
     output_shape = (1, 5)
     W = from_array(
         _random_array((input_shape[1], output_shape[1])), name="weight"
     )
     b = from_array(
         _random_array((output_shape[1],)), name="bias"
     )
     _test_single_node(
         "Gemm",
         [input_shape],
         [output_shape],
         initializer=[W, b],
         decimal=3,
         transB=0,
         disable_rank5_mapping=disable_rank5_mapping
     )
Exemple #10
0
    def skip_test_lstm(self):  # type: () -> None
        x = 4
        h = 2
        seq_length = 3
        W = from_array(_random_array((4 * h, x)), name="gate_weights")
        R = from_array(_random_array((4 * h, h)), name="recursion_weights")
        B = from_array(_random_array((8 * h, )), name="biases")
        seq_lens_input = from_array(np.array([seq_length]).astype(np.int32),
                                    name='seq_lens_input')
        initial_h = from_array(np.zeros((1, 1, h)).astype(np.float32),
                               name='initial_h')
        initial_c = from_array(np.zeros((1, 1, h)).astype(np.float32),
                               name='initial_c')

        input_shape = (seq_length, 1, x)
        output_shape_all = (seq_length, 1, h)
        output_shape_last = (1, 1, h)

        onnx_model = _onnx_create_single_node_model(
            "LSTM", [input_shape], [output_shape_all, output_shape_last],
            initializer=[W, R, B, seq_lens_input, initial_h, initial_c],
            hidden_size=h)
        X = np.random.rand(*input_shape).astype("float32")  #type: ignore
        prepared_backend = caffe2.python.onnx.backend.prepare(onnx_model)
        out = prepared_backend.run({'input0': X})
        caffe2_out_all = out['output0']
        caffe2_out_last = out['output1']

        coreml_model = convert(onnx_model)
        inputdict = {}
        inputdict['input0'] = X
        inputdict['initial_h'] = np.zeros((h), dtype=np.float32)
        inputdict['initial_c'] = np.zeros((h), dtype=np.float32)
        coreml_out_dict = coreml_model.predict(inputdict, useCPUOnly=True)
        coreml_out_all = coreml_out_dict['output0']
        coreml_out_last = coreml_out_dict['output1']

        _assert_outputs(caffe2_out_all.flatten(),
                        coreml_out_all.flatten(),
                        decimal=5)
        _assert_outputs(caffe2_out_last.flatten(),
                        coreml_out_last.flatten(),
                        decimal=5)
Exemple #11
0
    def test_create_graph(self):  # type: () -> None
        kernel_shape = (3, 2)
        strides = (2, 3)
        pads = (4, 2, 4, 2)
        dilations = (1, 2)
        group = 1
        weight = numpy_helper.from_array(
            _random_array((16, 3, 3, 2)), name="weight"
        )

        input_shape = (1, 3, 224, 224)
        output_size = _conv_pool_output_size(input_shape, dilations,
                                             kernel_shape, pads, strides)

        output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])

        inputs = [('input0', input_shape)]
        outputs = [('output0', output_shape)]

        conv = helper.make_node(
            "Conv",
            inputs=[inputs[0][0], "weight"],
            outputs=["conv_output"],
            dilations=dilations,
            group=group,
            kernel_shape=kernel_shape,
            pads=pads,
            strides=strides
        )

        relu = helper.make_node(
            "Relu",
            inputs=[conv.output[0]],
            outputs=[outputs[0][0]]
        )

        model = _onnx_create_model([conv, relu], inputs, outputs, [weight])
        graph_ = Graph.from_onnx(model.graph)
        self.assertTrue(len(graph_.inputs) == 1)
        self.assertEqual(graph_.inputs[0][2], input_shape)
        self.assertTrue(len(graph_.outputs) == 1)
        self.assertEqual(graph_.outputs[0][2], output_shape)
        self.assertTrue(len(graph_.nodes) == 2)
        self.assertEqual(len(graph_.nodes[0].parents), 0)
        self.assertEqual(len(graph_.nodes[1].parents), 1)
        self.assertEqual(len(graph_.nodes[0].children), 1)
        self.assertEqual(len(graph_.nodes[1].children), 0)