コード例 #1
0
ファイル: test_reduction.py プロジェクト: apple/coremltools
    def test_builder_to_backend_symbolic(self, use_cpu_only, backend):
        s0 = get_new_symbol()

        val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32)
        input_placeholders = {"x": mb.placeholder(shape=(s0, 3))}
        input_values = {"x": val}

        def build(x):
            return [
                mb.reduce_argmax(x=x, axis=1, keep_dims=True),
                mb.reduce_argmin(x=x, axis=0, keep_dims=True),
            ]

        expected_output_types = [(s0, 1, types.int32), (1, 3, types.int32)]
        expected_outputs = [
            np.array([[2], [2]], dtype=np.int32),
            np.array([[0, 0, 0]], dtype=np.int32),
        ]

        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_only,
            frontend_only=False,
            backend=backend,
        )
コード例 #2
0
ファイル: test_reduction.py プロジェクト: apple/coremltools
    def test_builder_to_backend_global_pool_3d(self, use_cpu_only, backend,
                                               mode):
        # test lowering to spatial reduction to global_pool path
        val = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]],
                       dtype=np.float32)
        input_placeholders = {"x": mb.placeholder(shape=val.shape)}
        input_values = {"x": val}

        expected_output_types = (1, 1, 1, 1, 1, types.fp32)

        if mode == "max":
            build = lambda x: mb.reduce_max(
                x=x, axes=[2, -1, 3], keep_dims=True)
            expected_outputs = np.array([[[[[6.0]]]]], dtype=np.float32)
        elif mode == "mean":
            build = lambda x: mb.reduce_mean(
                x=x, axes=[-3, 3, 4], keep_dims=True)
            expected_outputs = np.array([[[[[3.5]]]]], dtype=np.float32)

        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_only,
            backend=backend,
        )
コード例 #3
0
ファイル: test_reduction.py プロジェクト: aseemw/coremltools
    def test_builder_to_backend_smoke(self, use_cpu_only, backend, mode):
        val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32)
        input_placeholders = {"x": mb.placeholder(shape=val.shape)}
        input_values = {"x": val}

        if mode in {"argmax", "argmin"}:
            expected_output_types = (2, types.int32)
        else:
            expected_output_types = (2, types.fp32)

        if mode == "argmax":
            build = lambda x: mb.reduce_argmax(x=x, axis=1, keep_dims=False)
            expected_outputs = np.array([2, 2], dtype=np.int32)
        elif mode == "argmin":
            build = lambda x: mb.reduce_argmin(x=x, axis=1, keep_dims=False)
            expected_outputs = np.array([0, 0], dtype=np.int32)
        elif mode == "l1_norm":
            build = lambda x: mb.reduce_l1_norm(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([6.0, 15.0], dtype=np.float32)
        elif mode == "l2_norm":
            build = lambda x: mb.reduce_l2_norm(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([3.74165738, 8.77496438], dtype=np.float32)
        elif mode == "log_sum":
            build = lambda x: mb.reduce_log_sum(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([1.7917595, 2.70805025], dtype=np.float32)
        elif mode == "log_sum_exp":
            build = lambda x: mb.reduce_log_sum_exp(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([3.40760589, 6.40760612], dtype=np.float32)
        elif mode == "max":
            build = lambda x: mb.reduce_max(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([3.0, 6.0], dtype=np.float32)
        elif mode == "mean":
            build = lambda x: mb.reduce_mean(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([2.0, 5.0], dtype=np.float32)
        elif mode == "min":
            build = lambda x: mb.reduce_min(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([1.0, 4.0], dtype=np.float32)
        elif mode == "prod":
            build = lambda x: mb.reduce_prod(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([6.0, 120.0], dtype=np.float32)
        elif mode == "sum":
            build = lambda x: mb.reduce_sum(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([6.0, 15.0], dtype=np.float32)
        elif mode == "sum_square":
            build = lambda x: mb.reduce_sum_square(x=x, axes=[1], keep_dims=False)
            expected_outputs = np.array([14.0, 77.0], dtype=np.float32)
        else:
            raise NotImplementedError()

        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_only,
            frontend_only=False,
            backend=backend,
        )
コード例 #4
0
    def test_builder_to_backend_smoke(self, use_cpu_for_conversion, backend):

        t = np.array(range(4)).reshape(4, 1).astype(np.float32)
        decompressed_constant = np.array([1, 2, 3,
                                          4]).reshape(4, 1).astype(np.float32)
        input_placeholders = {
            "x": mb.placeholder(shape=t.shape),
        }
        input_values = {"x": t}

        def build(x):
            source_val = np.array([1, 2, 3, 4]).reshape(4,
                                                        1).astype(np.float16)
            y = mb.constexpr_cast(source_val=source_val, output_dtype="fp32")
            return mb.add(x=x, y=y)

        expected_output_types = (4, 1, types.fp32)
        expected_outputs = t + decompressed_constant.astype(np.float32)

        mlmodel = run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_for_conversion,
            frontend_only=False,
            backend=backend,
            converter=ct.convert,
            minimum_deployment_target=ct.target.iOS16,
        )

        # validate that the constexpr op is not removed by any graph pass
        prog = mlmodel._mil_program
        assert "constexpr_cast" in get_op_types_in_program(prog)
コード例 #5
0
    def test_builder_to_backend_smoke(self, use_cpu_for_conversion, backend):

        t = np.array(range(4)).reshape(4, 1).astype(np.float32)
        decompressed_constant = np.array([1, 2, 3,
                                          4]).reshape(4, 1).astype(np.float32)
        input_placeholders = {
            "x": mb.placeholder(shape=t.shape),
        }
        input_values = {"x": t}

        def build(x):
            lut_data = np.array([
                -19.0,
                4.0,
                0.0,
                -1.0,
                1.0,
                3.0,
                5.0,
                -8.0,
                19,
                13,
                42,
                4.5,
                5.4,
                2.0,
                -6,
                -7,
            ]).astype(np.float32)
            indices = np.array([212, 21]).astype(np.uint8)
            shape = np.array([4, 1]).astype(np.uint32)
            y = mb.constexpr_lut_to_dense(lut=lut_data,
                                          indices=indices,
                                          shape=shape)
            return mb.add(x=x, y=y)

        expected_output_types = (4, 1, types.fp32)
        expected_outputs = t + decompressed_constant.astype(np.float32)

        mlmodel = run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_for_conversion,
            frontend_only=False,
            backend=backend,
            converter=ct.convert,
            minimum_deployment_target=ct.target.iOS16,
        )

        # validate that the constexpr op is not removed by any graph pass
        prog = mlmodel._mil_program
        assert "constexpr_lut_to_dense" in get_op_types_in_program(prog)
コード例 #6
0
    def test_builder_to_backend_smoke(self, use_cpu_for_conversion, backend):

        t = np.array(range(4)).reshape(1, 1, 2, 2).astype(np.float32)
        decompressed_constant = (np.array([1, 2, 3,
                                           4]).reshape(1, 1, 2,
                                                       2).astype(np.float32))
        input_placeholders = {
            "x": mb.placeholder(shape=t.shape),
        }
        input_values = {"x": t}

        def build(x):
            quantized_data = np.array([3, 5, 5, 6]).reshape(1, 1, 2,
                                                            2).astype(np.uint8)
            scale = np.array([1, 2]).astype(np.float32)
            zero_point = np.array([2, 4]).astype(np.uint8)
            axis = 3
            y = mb.constexpr_affine_dequantize(
                quantized_data=quantized_data,
                zero_point=zero_point,
                scale=scale,
                axis=axis,
            )
            return mb.add(x=x, y=y)

        expected_output_types = (1, 1, 2, 2, types.fp32)
        expected_outputs = t + decompressed_constant.astype(np.float32)

        mlmodel = run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_for_conversion,
            frontend_only=False,
            backend=backend,
            converter=ct.convert,
            minimum_deployment_target=ct.target.iOS16,
        )

        # validate that the constexpr op is not removed by any graph pass
        prog = mlmodel._mil_program
        assert "constexpr_affine_dequantize" in get_op_types_in_program(prog)