コード例 #1
0
 def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op,
                                          float32_input_model_relu_ops,
                                          int32_input_model):
     """
     Same test as test_fp16_input_dtype, but with Float32 precision
     """
     mlmodel = ct.convert(
         float32_input_model_add_op,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
     """
     Although no FP16ComputePrecision is applied, the float16 input propagates through the network
     """
     mlmodel = ct.convert(
         float32_input_model_relu_ops,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     assert_ops_in_mil_program(mlmodel,
                               expected_op_list=["cast", "relu", "relu"])
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
コード例 #2
0
    def test_color_input(self, rank4_input_model, rank3_input_model):
        mlmodel = ct.convert(
            rank4_input_model,
            inputs=[
                ct.ImageType(shape=(1, 3, 10, 20),
                             color_layout=ct.colorlayout.RGB)
            ],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "add", "cast"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program,
                                expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        with pytest.raises(ValueError, match="must have rank 4"):
            mlmodel = ct.convert(
                rank3_input_model,
                inputs=[
                    ct.ImageType(shape=(1, 10, 20),
                                 color_layout=ct.colorlayout.RGB)
                ],
                minimum_deployment_target=ct.target.macOS12,
            )
コード例 #3
0
    def test_color_output(self, rank4_input_model, rank4_input_model_with_channel_first_output):
        # check that an error is raised if the output shape is not of form (1, 3, H, W)
        with pytest.raises(ValueError, match="Shape of the RGB/BGR image output,"):
            mlmodel = ct.convert(rank4_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                                 outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        mlmodel = ct.convert(rank4_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        # check neural network conversion
        mlmodel = ct.convert(rank4_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)],
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        verify_prediction(mlmodel)
コード例 #4
0
 def test_linear_model(self, linear_model):
     # this will test the fuse_matmul_weight_bias pass, when the inputs are of type float16
     mlmodel = ct.convert(linear_model,
                          inputs=[ct.TensorType(dtype=np.float16)],
                          outputs=[ct.TensorType(dtype=np.float16)],
                          minimum_deployment_target=ct.target.macOS13,
                          )
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp16")
     assert_ops_in_mil_program(mlmodel, ["linear", "relu"])
     verify_prediction(mlmodel)
コード例 #5
0
    def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops):
        # test output type
        mlmodel = ct.convert(int32_input_model,
                             minimum_deployment_target=ct.target.macOS12)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_output_dtype(mlmodel, expected_type_str="int32")

        # test that error is raised when an output of unknown name is provided
        with pytest.raises(Exception):
            # output name does not exist in the model
            mlmodel = ct.convert(int32_input_model,
                                 outputs=["z"],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that error is raised when two outputs are provided without names
        with pytest.raises(ValueError, match=", does not have names"):
            mlmodel = ct.convert(int32_input_model,
                                 outputs=[ct.TensorType(dtype=np.float32), ct.TensorType(dtype=np.float32)],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that an error is raised when shape is provided for the output
        with pytest.raises(ValueError):
            mlmodel = ct.convert(int32_input_model,
                                 outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that the output dtype provided by the user is applied during conversion
        mlmodel = ct.convert(int32_input_model,
                             outputs=[ct.TensorType(dtype=np.float32)],
                             minimum_deployment_target=ct.target.macOS12)
        assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])

        # test that output dtype of float16 is rejected when deployment target is low
        with pytest.raises(TypeError,
                           match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"
                           ):
            ct.convert(float32_input_model_relu_ops,
                       outputs=[ct.TensorType(dtype=np.float16)],
                       minimum_deployment_target=ct.target.macOS12,
                       )

        # test that output type float16 is applied correctly
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "relu"])

        # test that input and output types float16 are applied correctly
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "relu"])
        verify_prediction(mlmodel)
コード例 #6
0
    def test_grayscale_output(self, rank4_grayscale_input_model, rank4_grayscale_input_model_with_channel_first_output):
        # check that an error is raised if the output shape is not of form (1, 1, H, W)
        with pytest.raises(ValueError, match="Shape of the Grayscale image output,"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 )

        with pytest.raises(TypeError, match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                                 outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 minimum_deployment_target=ct.target.macOS12,
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        verify_prediction(mlmodel)

        mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_cast_ops_count(mlmodel, expected_count=0)
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16")
        verify_prediction(mlmodel)
コード例 #7
0
    def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model):
        with pytest.raises(ValueError, match="must have rank 4"):
            mlmodel = ct.convert(rank3_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        # invalid shape
        with pytest.raises(ValueError):
            mlmodel = ct.convert(rank4_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 minimum_deployment_target=ct.target.macOS12,
                                 )

        # test that grayscale_16 raises error when used with neural network
        with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["transpose", "add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        verify_prediction(mlmodel)
コード例 #8
0
    def test_color_output(self, rank4_input_model, float32_input_model_add_op):
        # check that an error is raised if the output shape is not of form (1, 3, H, W)
        with pytest.raises(ValueError,
                           match="must have rank 4. Instead it has rank 2"):
            ct.convert(float32_input_model_add_op,
                       inputs=[ct.TensorType(shape=(10, 20))],
                       outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                       minimum_deployment_target=ct.target.macOS13)

        mlmodel = ct.convert(
            rank4_input_model,
            inputs=[
                ct.ImageType(shape=(1, 3, 10, 20),
                             color_layout=ct.colorlayout.BGR)
            ],
            outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "add", "cast"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        assert_spec_output_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program,
                                expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        # check neural network conversion
        mlmodel = ct.convert(
            rank4_input_model,
            inputs=[
                ct.ImageType(shape=(1, 3, 10, 20),
                             color_layout=ct.colorlayout.RGB)
            ],
            outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)],
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_spec_output_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        verify_prediction(mlmodel)
コード例 #9
0
    def test_two_input_model(self, float32_two_input_model):
        # test that error is raised if only 1 input is provided
        with pytest.raises(ValueError):
            ct.convert(float32_two_input_model,
                       inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)],
                       minimum_deployment_target=ct.target.macOS12)

        # test forcing 1st input to type int32
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[
                                 ct.TensorType(shape=(10, 20), dtype=np.int32),
                                 ct.TensorType(shape=(10, 20))
                             ],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="int32", index=0)
        assert_input_dtype(mlmodel, expected_type_str="fp32", index=1)
        assert_output_dtype(mlmodel, expected_type_str="fp32")

        # test forcing both inputs to be int32
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[
                                 ct.TensorType(shape=(10, 20), dtype=np.int32),
                                 ct.TensorType(shape=(10, 20), dtype=np.int32),
                             ],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="int32", index=0)
        assert_input_dtype(mlmodel, expected_type_str="int32", index=1)
        assert_output_dtype(mlmodel, expected_type_str="int32")

        # test forcing both inputs to be float16
        mlmodel = ct.convert(float32_two_input_model,
                             inputs=[
                                 ct.TensorType(shape=(10, 20),
                                               dtype=np.float16),
                                 ct.TensorType(shape=(10, 20),
                                               dtype=np.float16),
                             ],
                             minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16", index=0)
        assert_input_dtype(mlmodel, expected_type_str="fp16", index=1)
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)
コード例 #10
0
 def test_classifier(self):
     torch_model = torch.nn.ReLU().eval()
     traced_model = torch.jit.trace(torch_model, torch.rand(3, ))
     model = ct.convert(
         traced_model,
         inputs=[ct.TensorType(shape=(3, ), dtype=np.float16)],
         outputs=[ct.TensorType(dtype=np.float16)],
         classifier_config=ct.ClassifierConfig(['a', 'b', 'c']),
         convert_to='mlprogram',
         minimum_deployment_target=ct.target.macOS13,
     )
     assert_input_dtype(model, expected_type_str="fp16")
     assert_ops_in_mil_program(model, ["relu", "cast", "classify"])
     spec = model.get_spec()
     input_name = spec.description.input[0].name
     out_dict = model.predict({input_name: np.array([1.0, 2.0, 3.0])})
     assert 'classLabel' in out_dict
     assert out_dict['classLabel'] == 'c'
     assert len(spec.description.output) == 2
     assert "classLabel_probs" in out_dict
     assert isinstance(out_dict["classLabel_probs"], dict)
コード例 #11
0
    def test_fp16_input_dtype(self, float32_input_model_add_op,
                              float32_input_model_relu_ops, int32_input_model):
        """
        Test that providing fp16 input dtype works with macOS13.
        """
        mlmodel = ct.convert(
            float32_input_model_add_op,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["relu", "relu", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            int32_input_model,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)
コード例 #12
0
    def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op, float32_input_model_relu_ops,
                                             int32_input_model):
        """
        Same test as test_fp16_input_dtype, but with Float32 precision
        """
        mlmodel = ct.convert(float32_input_model_add_op,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             compute_precision=ct.precision.FLOAT32,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             compute_precision=ct.precision.FLOAT32,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "relu"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
コード例 #13
0
    def test_grayscale_output(self, rank4_grayscale_input_model):
        with pytest.raises(
                TypeError,
                match=
                "float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"
        ):
            ct.convert(
                rank4_grayscale_input_model,
                inputs=[ct.TensorType(shape=(1, 1, 10, 20))],
                outputs=[
                    ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
                ],
                minimum_deployment_target=ct.target.macOS12,
            )

        mlmodel = ct.convert(
            rank4_grayscale_input_model,
            inputs=[
                ct.ImageType(shape=(1, 1, 10, 20),
                             color_layout=ct.colorlayout.GRAYSCALE)
            ],
            outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_spec_output_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            rank4_grayscale_input_model,
            inputs=[
                ct.ImageType(shape=(1, 1, 10, 20),
                             color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
            ],
            outputs=[
                ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
            ],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(
            mlmodel._spec,
            expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_spec_output_image_type(
            mlmodel._spec,
            expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16")
        assert_prog_output_type(mlmodel._mil_program,
                                expected_dtype_str="fp16")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            rank4_grayscale_input_model,
            inputs=[
                ct.ImageType(shape=(1, 1, 10, 20),
                             color_layout=ct.colorlayout.GRAYSCALE)
            ],
            outputs=[
                ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
            ],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_spec_output_image_type(
            mlmodel._spec,
            expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program,
                                expected_dtype_str="fp16")
        verify_prediction(mlmodel)
コード例 #14
0
    def test_single_output_model(self, int32_input_model,
                                 float32_input_model_relu_ops):
        # test output type: if not provided, it should be the default which is float32
        mlmodel = ct.convert(int32_input_model,
                             inputs=[ct.TensorType(shape=(10, 20))],
                             minimum_deployment_target=ct.target.macOS12)
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp32")
        assert_output_dtype(mlmodel, expected_type_str="fp32")

        # test that the output dtype provided by the user is applied during conversion
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(shape=(10, 20))],
                             outputs=[ct.TensorType(dtype=np.int32)],
                             minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel, expected_type_str="fp32")
        assert_output_dtype(mlmodel, expected_type_str="int32")
        assert_ops_in_mil_program(
            mlmodel, expected_op_list=["cast", "relu", "relu", "cast", "cast"])

        # test that an error is raised when shape is provided for the output
        with pytest.raises(ValueError):
            mlmodel = ct.convert(
                int32_input_model,
                inputs=[ct.TensorType(shape=(10, 20))],
                outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))],
                minimum_deployment_target=ct.target.macOS12)

        # test that output dtype of float16 is rejected when deployment target is low
        with pytest.raises(
                TypeError,
                match=
                "float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"
        ):
            ct.convert(
                float32_input_model_relu_ops,
                inputs=[ct.TensorType(shape=(10, 20))],
                outputs=[ct.TensorType(dtype=np.float16)],
                minimum_deployment_target=ct.target.macOS12,
            )

        # test that output type float16 is applied correctly
        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20))],
            outputs=[ct.TensorType(dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "relu", "relu"])

        # test that input and output types float16 are applied correctly
        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            outputs=[ct.TensorType(dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "relu"])
        verify_prediction(mlmodel)