Exemplo n.º 1
0
    def test_multi_output_model(self, float32_two_output_model):
        # check that error is raised when only 1 output provided
        with pytest.raises(
                ValueError,
                match="Number of outputs provided, 1, "
                "do not match the number of outputs detected in the model, 2"):
            ct.convert(float32_two_output_model,
                       inputs=[ct.TensorType(shape=(10, 20))],
                       outputs=[ct.TensorType()],
                       minimum_deployment_target=ct.target.macOS12)

        # set 1 output to float16 and the other to float32
        mlmodel = ct.convert(
            float32_two_output_model,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            outputs=[
                ct.TensorType(name="out1", dtype=np.float16),
                ct.TensorType(name="out2", dtype=np.float32)
            ],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_cast_ops_count(mlmodel, expected_count=1)
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel,
                            expected_type_str="fp16",
                            expected_name="out1",
                            index=0)
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="out2",
                            index=1)
        verify_prediction(mlmodel)
Exemplo n.º 2
0
 def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op,
                                          float32_input_model_relu_ops,
                                          int32_input_model):
     """
     Same test as test_fp16_input_dtype, but with Float32 precision
     """
     mlmodel = ct.convert(
         float32_input_model_add_op,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
     """
     Although no FP16ComputePrecision is applied, the float16 input propagates through the network
     """
     mlmodel = ct.convert(
         float32_input_model_relu_ops,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     assert_ops_in_mil_program(mlmodel,
                               expected_op_list=["cast", "relu", "relu"])
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
Exemplo n.º 3
0
 def test_input_dtype_user_provided(self, int32_input_model):
     # test that provided dtype in the api overrides the input dtype in the TF model
     mlmodel = ct.convert(int32_input_model,
                          inputs=[ct.TensorType(dtype=np.float32)],
                          minimum_deployment_target=ct.target.macOS12)
     assert_input_dtype(mlmodel, expected_type_str="fp32")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
Exemplo n.º 4
0
 def test_input_dtype_user_provided(self, float32_input_model_add_op):
     # test that provided dtype in the api is applied
     mlmodel = ct.convert(
         float32_input_model_add_op,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)],
         minimum_deployment_target=ct.target.macOS12)
     assert_input_dtype(mlmodel, expected_type_str="int32")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
Exemplo n.º 5
0
 def test_linear_model(self, linear_model):
     # this will test the fuse_matmul_weight_bias pass, when the inputs are of type float16
     mlmodel = ct.convert(linear_model,
                          inputs=[ct.TensorType(dtype=np.float16)],
                          outputs=[ct.TensorType(dtype=np.float16)],
                          minimum_deployment_target=ct.target.macOS13,
                          )
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp16")
     assert_ops_in_mil_program(mlmodel, ["linear", "relu"])
     verify_prediction(mlmodel)
Exemplo n.º 6
0
    def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops):
        # test output type
        mlmodel = ct.convert(int32_input_model,
                             minimum_deployment_target=ct.target.macOS12)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_output_dtype(mlmodel, expected_type_str="int32")

        # test that error is raised when an output of unknown name is provided
        with pytest.raises(Exception):
            # output name does not exist in the model
            mlmodel = ct.convert(int32_input_model,
                                 outputs=["z"],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that error is raised when two outputs are provided without names
        with pytest.raises(ValueError, match=", does not have names"):
            mlmodel = ct.convert(int32_input_model,
                                 outputs=[ct.TensorType(dtype=np.float32), ct.TensorType(dtype=np.float32)],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that an error is raised when shape is provided for the output
        with pytest.raises(ValueError):
            mlmodel = ct.convert(int32_input_model,
                                 outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that the output dtype provided by the user is applied during conversion
        mlmodel = ct.convert(int32_input_model,
                             outputs=[ct.TensorType(dtype=np.float32)],
                             minimum_deployment_target=ct.target.macOS12)
        assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])

        # test that output dtype of float16 is rejected when deployment target is low
        with pytest.raises(TypeError,
                           match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"
                           ):
            ct.convert(float32_input_model_relu_ops,
                       outputs=[ct.TensorType(dtype=np.float16)],
                       minimum_deployment_target=ct.target.macOS12,
                       )

        # test that output type float16 is applied correctly
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "relu"])

        # test that input and output types float16 are applied correctly
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "relu"])
        verify_prediction(mlmodel)
Exemplo n.º 7
0
    def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model):
        with pytest.raises(ValueError, match="must have rank 4"):
            mlmodel = ct.convert(rank3_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        # invalid shape
        with pytest.raises(ValueError):
            mlmodel = ct.convert(rank4_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 minimum_deployment_target=ct.target.macOS12,
                                 )

        # test that grayscale_16 raises error when used with neural network
        with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["transpose", "add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        verify_prediction(mlmodel)
Exemplo n.º 8
0
    def test_output_name_specified_by_user(self, float32_input_model_relu_ops,
                                           float32_two_output_model):
        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")],
            outputs=[ct.TensorType(name="custom_output_name")],
            minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel,
                           expected_type_str="fp32",
                           expected_name="custom_input_name")
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="custom_output_name")

        mlmodel = ct.convert(
            float32_two_output_model,
            inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")],
            outputs=[
                ct.TensorType(name="custom_output1_name"),
                ct.TensorType(name="custom_output2_name")
            ],
            minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel,
                           expected_type_str="fp32",
                           expected_name="custom_input_name")
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="custom_output1_name",
                            index=0)
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="custom_output2_name",
                            index=1)
Exemplo n.º 9
0
    def test_fp16_input_dtype(self, float32_input_model_add_op,
                              float32_input_model_relu_ops, int32_input_model):
        """
        Test that providing fp16 input dtype works with macOS13.
        """
        mlmodel = ct.convert(
            float32_input_model_add_op,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["relu", "relu", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            int32_input_model,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)
Exemplo n.º 10
0
    def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op, float32_input_model_relu_ops,
                                             int32_input_model):
        """
        Same test as test_fp16_input_dtype, but with Float32 precision
        """
        mlmodel = ct.convert(float32_input_model_add_op,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             compute_precision=ct.precision.FLOAT32,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             compute_precision=ct.precision.FLOAT32,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "relu"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
Exemplo n.º 11
0
    def test_two_input_model(self, float32_two_input_model):
        # test that error is raised if only 1 input is provided
        with pytest.raises(ValueError):
            ct.convert(float32_two_input_model,
                       inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)],
                       minimum_deployment_target=ct.target.macOS12)

        # test forcing 1st input to type int32
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[
                                 ct.TensorType(shape=(10, 20), dtype=np.int32),
                                 ct.TensorType(shape=(10, 20))
                             ],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="int32", index=0)
        assert_input_dtype(mlmodel, expected_type_str="fp32", index=1)
        assert_output_dtype(mlmodel, expected_type_str="fp32")

        # test forcing both inputs to be int32
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[
                                 ct.TensorType(shape=(10, 20), dtype=np.int32),
                                 ct.TensorType(shape=(10, 20), dtype=np.int32),
                             ],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="int32", index=0)
        assert_input_dtype(mlmodel, expected_type_str="int32", index=1)
        assert_output_dtype(mlmodel, expected_type_str="int32")

        # test forcing both inputs to be float16
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[
                                 ct.TensorType(shape=(10, 20),
                                               dtype=np.float16),
                                 ct.TensorType(shape=(10, 20),
                                               dtype=np.float16),
                             ],
                             minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16", index=0)
        assert_input_dtype(mlmodel, expected_type_str="fp16", index=1)
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)
Exemplo n.º 12
0
    def test_multi_output_model(self, float32_two_output_model):
        # check that error is raised when only 1 output provided
        with pytest.raises(ValueError, match="please provide names for each of the outputs"):
            mlmodel = ct.convert(float32_two_output_model,
                                 outputs=[ct.TensorType(dtype=np.float16)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        # check that error is raised when multiple outputs are provided without names
        with pytest.raises(ValueError, match="please provide names for each of the outputs"):
            mlmodel = ct.convert(float32_two_output_model,
                                 outputs=[ct.TensorType(dtype=np.float16), ct.TensorType(dtype=np.float32)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        # set 1 output to float16 and the other to float32
        output1_name = "Identity" if _HAS_TF_2 else "output1"
        output2_name = "Identity_1" if _HAS_TF_2 else "output2"
        mlmodel = ct.convert(float32_two_output_model,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             outputs=[ct.TensorType(name=output2_name, dtype=np.float16),
                                      ct.TensorType(name=output1_name, dtype=np.float32)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_cast_ops_count(mlmodel, expected_count=1)
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name=output2_name, index=0)
        assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name=output1_name, index=1)
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        verify_prediction(mlmodel)

        # in this case only the single output will be selected
        mlmodel = ct.convert(float32_two_output_model,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             outputs=[ct.TensorType(name=output2_name, dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_cast_ops_count(mlmodel, expected_count=0)
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name=output2_name, index=0)
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        verify_prediction(mlmodel)
Exemplo n.º 13
0
    def test_two_input_model(self, float32_two_input_model):
        # test forcing input type of "input1" to be int32
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[ct.TensorType(name="input1", dtype=np.int32)],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input1")
        assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="input2")
        assert_output_dtype(mlmodel, expected_type_str="fp32")

        # test forcing both inputs to be int32
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[ct.TensorType(name="input1", dtype=np.int32),
                                     ct.TensorType(name="input2", dtype=np.int32),
                                     ],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input1")
        assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input2")
        assert_output_dtype(mlmodel, expected_type_str="int32")

        # if names are not provided an error should be raised
        with pytest.raises(ValueError):
            mlmodel = ct.convert(float32_two_input_model,
                                 inputs=[ct.TensorType(dtype=np.int32),
                                         ct.TensorType(dtype=np.int32),
                                         ],
                                 minimum_deployment_target=ct.target.macOS12)

        # test forcing both inputs to be float16
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[ct.TensorType(name="input1", dtype=np.float16),
                                     ct.TensorType(name="input2", dtype=np.float16),
                                     ],
                             minimum_deployment_target=ct.target.macOS13)
        assert_input_dtype(mlmodel, expected_type_str="fp16", expected_name="input1")
        assert_input_dtype(mlmodel, expected_type_str="fp16", expected_name="input2")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        assert_cast_ops_count(mlmodel, expected_count=1)
        verify_prediction(mlmodel)
Exemplo n.º 14
0
    def test_single_output_model(self, int32_input_model,
                                 float32_input_model_relu_ops):
        # test output type: if not provided, it should be the default which is float32
        mlmodel = ct.convert(int32_input_model,
                             inputs=[ct.TensorType(shape=(10, 20))],
                             minimum_deployment_target=ct.target.macOS12)
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp32")
        assert_output_dtype(mlmodel, expected_type_str="fp32")

        # test that the output dtype provided by the user is applied during conversion
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(shape=(10, 20))],
                             outputs=[ct.TensorType(dtype=np.int32)],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="fp32")
        assert_output_dtype(mlmodel, expected_type_str="int32")
        assert_ops_in_mil_program(
            mlmodel, expected_op_list=["cast", "relu", "relu", "cast", "cast"])

        # test that an error is raised when shape is provided for the output
        with pytest.raises(ValueError):
            mlmodel = ct.convert(
                int32_input_model,
                inputs=[ct.TensorType(shape=(10, 20))],
                outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))],
                minimum_deployment_target=ct.target.macOS12)

        # test that output dtype of float16 is rejected when deployment target is low
        with pytest.raises(
                TypeError,
                match=
                "float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"
        ):
            ct.convert(
                float32_input_model_relu_ops,
                inputs=[ct.TensorType(shape=(10, 20))],
                outputs=[ct.TensorType(dtype=np.float16)],
                minimum_deployment_target=ct.target.macOS12,
            )

        # test that output type float16 is applied correctly
        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20))],
            outputs=[ct.TensorType(dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "relu", "relu"])

        # test that input and output types float16 are applied correctly
        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            outputs=[ct.TensorType(dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "relu"])
        verify_prediction(mlmodel)