예제 #1
0
    def test_output_name_specified_by_user(self, float32_input_model_relu_ops,
                                           float32_two_output_model):
        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")],
            outputs=[ct.TensorType(name="custom_output_name")],
            minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel,
                           expected_type_str="fp32",
                           expected_name="custom_input_name")
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="custom_output_name")

        mlmodel = ct.convert(
            float32_two_output_model,
            inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")],
            outputs=[
                ct.TensorType(name="custom_output1_name"),
                ct.TensorType(name="custom_output2_name")
            ],
            minimum_deployment_target=ct.target.macOS12)
        assert_input_dtype(mlmodel,
                           expected_type_str="fp32",
                           expected_name="custom_input_name")
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="custom_output1_name",
                            index=0)
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="custom_output2_name",
                            index=1)
예제 #2
0
 def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op,
                                          float32_input_model_relu_ops,
                                          int32_input_model):
     """
     Same test as test_fp16_input_dtype, but with Float32 precision
     """
     mlmodel = ct.convert(
         float32_input_model_add_op,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
     """
     Although no FP16ComputePrecision is applied, the float16 input propagates through the network
     """
     mlmodel = ct.convert(
         float32_input_model_relu_ops,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     assert_ops_in_mil_program(mlmodel,
                               expected_op_list=["cast", "relu", "relu"])
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
예제 #3
0
    def test_multi_output_model(self, float32_two_output_model):
        # check that error is raised when only 1 output provided
        with pytest.raises(
                ValueError,
                match="Number of outputs provided, 1, "
                "do not match the number of outputs detected in the model, 2"):
            ct.convert(float32_two_output_model,
                       inputs=[ct.TensorType(shape=(10, 20))],
                       outputs=[ct.TensorType()],
                       minimum_deployment_target=ct.target.macOS12)

        # set 1 output to float16 and the other to float32
        mlmodel = ct.convert(
            float32_two_output_model,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            outputs=[
                ct.TensorType(name="out1", dtype=np.float16),
                ct.TensorType(name="out2", dtype=np.float32)
            ],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_cast_ops_count(mlmodel, expected_count=1)
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel,
                            expected_type_str="fp16",
                            expected_name="out1",
                            index=0)
        assert_output_dtype(mlmodel,
                            expected_type_str="fp32",
                            expected_name="out2",
                            index=1)
        verify_prediction(mlmodel)
예제 #4
0
    def test_tf2keras_shared_range_dim(use_symbol):
        # Test examples in https://coremltools.readme.io/docs/flexible-inputs
        import tensorflow as tf

        input_dim = 3
        # None denotes seq_len dimension
        x1 = tf.keras.Input(shape=(None,input_dim), name="seq1")
        x2 = tf.keras.Input(shape=(None,input_dim), name="seq2")
        y = x1 + x2
        keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y])

        # One RangeDim shared by two inputs
        if use_symbol:
            seq_len_dim = ct.RangeDim(symbol='seq_len')
        else:
            # symbol is optional
            seq_len_dim = ct.RangeDim()
        seq1_input = ct.TensorType(name="seq1", shape=(1, seq_len_dim, input_dim))
        seq2_input = ct.TensorType(name="seq2", shape=(1, seq_len_dim, input_dim))
        mlmodel = ct.convert(keras_model,
                inputs=[seq1_input, seq2_input])

        batch = 1
        seq_len = 5
        test_input_x1 = np.random.rand(batch, seq_len, input_dim).astype(np.float32)
        test_input_x2 = np.random.rand(batch, seq_len, input_dim).astype(np.float32)
        expected_val = keras_model([test_input_x1, test_input_x2])
        if ct.utils._is_macos():
            results = mlmodel.predict({
                "seq1": test_input_x1,
                "seq2": test_input_x2})
            np.testing.assert_allclose(results["Identity"], expected_val,
                rtol=1e-4, atol=1e-3)
예제 #5
0
    def test_tf2keras_optional_input():
        # Test examples in https://coremltools.readme.io/docs/flexible-inputs
        import tensorflow as tf

        input_dim = 3
        # None denotes seq_len dimension
        x1 = tf.keras.Input(shape=(None, input_dim), name="optional_input")
        x2 = tf.keras.Input(shape=(None, input_dim), name="required_input")
        y = x1 + x2
        keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y])

        seq_len_dim = ct.RangeDim()
        default_value = np.ones((1, 2, input_dim)).astype(np.float32)
        optional_input = ct.TensorType(
            name="optional_input",
            shape=(1, seq_len_dim, input_dim),
            default_value=default_value,
        )
        required_input = ct.TensorType(
            name="required_input",
            shape=(1, seq_len_dim, input_dim),
        )
        mlmodel = ct.convert(keras_model,
                             inputs=[optional_input, required_input])

        batch = 1
        seq_len = 2
        test_input_x2 = np.random.rand(batch, seq_len,
                                       input_dim).astype(np.float32)
        expected_val = keras_model([default_value, test_input_x2])
        if ct.utils._is_macos():
            results = mlmodel.predict({"required_input": test_input_x2})
            np.testing.assert_allclose(results["Identity"],
                                       expected_val,
                                       rtol=1e-4)
예제 #6
0
 def test_mil_default_value_to_proto(self):
     program_input_spec = [
         ct.TensorType(name="x",
                       shape=[1],
                       default_value=np.array([1.0]).astype(np.float32)),
         ct.TensorType(name="y", shape=[1])
     ]
     mlmodel = ct.convert(self.basic_network,
                          convert_to="mlprogram",
                          inputs=program_input_spec)
     input_spec = mlmodel.get_spec().description.input
     assert len(input_spec) == 2, "2 input expected, got {} instead".format(
         len(input_spec))
     assert input_spec[
         0].name == "x", "input name in MLModel is {}, 'x' is expected".format(
             input_spec[0].name)
     assert input_spec[0].type.WhichOneof(
         "Type"
     ) == "multiArrayType", "Expected multiArrayType, got {}".format(
         input_spec[0].type.WhichOneof("Type"))
     assert input_spec[0].type.multiArrayType.WhichOneof(
         "defaultOptionalValue"
     ) == "floatDefaultValue", "Expected floatDefaultValue, got {} instead".format(
         input_spec[0].type.multiArrayType.WhichOneof(
             "defaultOptionalValue"))
     assert input_spec[0].type.multiArrayType.floatDefaultValue == 1.0
예제 #7
0
    def test_fp16_input_dtype(self, float32_input_model_add_op,
                              float32_input_model_relu_ops, int32_input_model):
        """
        Test that providing fp16 input dtype works with macOS13.
        """
        mlmodel = ct.convert(
            float32_input_model_add_op,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            float32_input_model_relu_ops,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13)
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["relu", "relu", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(
            int32_input_model,
            inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp32")
        verify_prediction(mlmodel)
예제 #8
0
    def test_convert_from_frozen_graph_file(tmpdir):
        # create the model to convert
        import tensorflow as tf

        # write a toy frozen graph
        # Note that we usually needs to run freeze_graph() on tf.Graph()
        # skipping here as this toy model does not contain any variables
        with tf.Graph().as_default() as graph:
            x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input")
            y = tf.nn.relu(x, name="output")

        save_path = str(tmpdir)
        tf.io.write_graph(graph, save_path, "frozen_graph.pb", as_text=False)

        # Create a test sample
        # -0.5 to have some negative values
        test_input = np.random.rand(1, 2, 3) - 0.5
        with tf.compat.v1.Session(graph=graph) as sess:
            expected_val = sess.run(y, feed_dict={x: test_input})

        # The input `.pb` file is a frozen graph format that usually
        # generated by TensorFlow's utility function `freeze_graph()`
        pb_path = os.path.join(save_path, "frozen_graph.pb")

        # 3 ways to specify inputs:
        # (1) Fully specify inputs
        mlmodel = ct.convert(
            pb_path,
            # We specify inputs with name matching the placeholder name.
            inputs=[ct.TensorType(name="input", shape=(1, 2, 3))],
            outputs=["output"],
        )

        # (2) Specify input TensorType without name (when there's only one
        # input)
        mlmodel = ct.convert(
            pb_path,
            # TensorType name is optional when there's only one input.
            inputs=[ct.TensorType(shape=(1, 2, 3))],
            outputs=["output"],
        )

        # (3) Not specify inputs at all. `inputs` is optional for TF. When
        # inputs is not specified, convert() infers inputs from Placeholder
        # nodes.
        mlmodel = ct.convert(pb_path, outputs=["output"])

        results = mlmodel.predict({"input": test_input})
        np.testing.assert_allclose(results["output"], expected_val)
        mlmodel_path = os.path.join(save_path, "model.mlmodel")
        # Save the converted model
        mlmodel.save(mlmodel_path)

        results = mlmodel.predict({"input": test_input})
        np.testing.assert_allclose(results["output"], expected_val)
예제 #9
0
 def test_linear_model(self, linear_model):
     # this will test the fuse_matmul_weight_bias pass, when the inputs are of type float16
     mlmodel = ct.convert(linear_model,
                          inputs=[ct.TensorType(dtype=np.float16)],
                          outputs=[ct.TensorType(dtype=np.float16)],
                          minimum_deployment_target=ct.target.macOS13,
                          )
     assert_input_dtype(mlmodel, expected_type_str="fp16")
     assert_output_dtype(mlmodel, expected_type_str="fp16")
     assert_ops_in_mil_program(mlmodel, ["linear", "relu"])
     verify_prediction(mlmodel)
예제 #10
0
    def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops):
        # test output type
        mlmodel = ct.convert(int32_input_model,
                             minimum_deployment_target=ct.target.macOS12)
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_output_dtype(mlmodel, expected_type_str="int32")

        # test that error is raised when an output of unknown name is provided
        with pytest.raises(Exception):
            # output name does not exist in the model
            mlmodel = ct.convert(int32_input_model,
                                 outputs=["z"],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that error is raised when two outputs are provided without names
        with pytest.raises(ValueError, match=", does not have names"):
            mlmodel = ct.convert(int32_input_model,
                                 outputs=[ct.TensorType(dtype=np.float32), ct.TensorType(dtype=np.float32)],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that an error is raised when shape is provided for the output
        with pytest.raises(ValueError):
            mlmodel = ct.convert(int32_input_model,
                                 outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))],
                                 minimum_deployment_target=ct.target.macOS12)

        # test that the output dtype provided by the user is applied during conversion
        mlmodel = ct.convert(int32_input_model,
                             outputs=[ct.TensorType(dtype=np.float32)],
                             minimum_deployment_target=ct.target.macOS12)
        assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"])

        # test that output dtype of float16 is rejected when deployment target is low
        with pytest.raises(TypeError,
                           match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"
                           ):
            ct.convert(float32_input_model_relu_ops,
                       outputs=[ct.TensorType(dtype=np.float16)],
                       minimum_deployment_target=ct.target.macOS12,
                       )

        # test that output type float16 is applied correctly
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "relu"])

        # test that input and output types float16 are applied correctly
        mlmodel = ct.convert(float32_input_model_relu_ops,
                             inputs=[ct.TensorType(dtype=np.float16)],
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_input_dtype(mlmodel, expected_type_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output")
        assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "relu"])
        verify_prediction(mlmodel)
예제 #11
0
    def test_invalid_input_dtype(self, int32_input_model):
        # error should be raised if a dtype is provided by the user that is not supported
        with pytest.raises(TypeError,
                           match="is unsupported for inputs/outputs of the model"
                           ):
            mlmodel = ct.convert(int32_input_model,
                                 inputs=[ct.TensorType(dtype=np.int16)],
                                 minimum_deployment_target=ct.target.macOS12)

        with pytest.raises(TypeError,
                           match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"
                           ):
            mlmodel = ct.convert(int32_input_model,
                                 inputs=[ct.TensorType(dtype=np.float16)],
                                 minimum_deployment_target=ct.target.macOS12)
예제 #12
0
def export_to_coreml(args, exp_args):
    if args.model == 'PortraitNet':
        import model_mobilenetv2_seg_small as modellib
        netmodel = modellib.MobileNetV2(n_class=2,
                                        useUpsample=exp_args.useUpsample,
                                        useDeconvGroup=exp_args.useDeconvGroup,
                                        addEdge=exp_args.addEdge,
                                        channelRatio=1.0,
                                        minChannel=16,
                                        weightInit=True,
                                        video=exp_args.video).cuda()
        #netmodel = netmodel.eval()
        weights_path = '/'.join((exp_args.model_root, 'model_best.pth.tar'))
        coreml_path = '/'.join((exp_args.model_root, 'PortaitNet.mlmodel'))
        state_dict = torch.load(weights_path)
        netmodel.load_state_dict(state_dict['state_dict'])
        batch_size = 1  #args.batchsize
        channels = 3
        input_width = exp_args.input_width
        input_height = exp_args.input_height
        dummy_input = torch.randn(batch_size,
                                  channels,
                                  input_width,
                                  input_height,
                                  device='cuda')
        netmodel = netmodel.eval()
        traced_model = torch.jit.trace(netmodel, dummy_input)
        mlmodel = ct.convert(traced_model,
                             inputs=[
                                 ct.TensorType(name="captured_input",
                                               shape=dummy_input.shape)
                             ])
        mlmodel.save(coreml_path)
예제 #13
0
파일: coreml.py 프로젝트: xxxgp/deepvac
    def process(self, cast_output_file=None):
        try:
            import coremltools
        except:
            LOG.logE(
                "You need to install coremltools package if you want to convert PyTorch to CoreML model. E.g. pip install --upgrade coremltools"
            )
            return

        output_coreml_file = self.config.coreml_model_dir
        if cast_output_file:
            output_coreml_file = '{}/coreml__{}.mlmodel'.format(
                self.config.output_dir, cast_output_file)
            self.config.coreml_model_dir = output_coreml_file

        LOG.logI(
            "config.coreml_model_dir found, save coreml model to {}...".format(
                self.config.coreml_model_dir))
        model = self.config.script_model_dir
        if self.config.script_model_dir is None:
            model = self.config.trace_model_dir
        #input mode
        if self.config.coreml_input_type == 'image':
            input = coremltools.ImageType(
                name="input",
                shape=tuple(self.config.sample.shape),
                scale=self.config.coreml_scale,
                color_layout=self.config.coreml_color_layout,
                bias=[
                    self.config.coreml_blue_bias,
                    self.config.coreml_green_bias, self.config.coreml_red_bias
                ])
        else:
            input = coremltools.TensorType(name='input',
                                           shape=tuple(
                                               self.config.sample.shape))
        #convert
        coreml_model = coremltools.convert(
            model=model,
            inputs=[input],
            classfier_config=self.config.coreml_classfier_config,
            minimum_deployment_target=self.config.
            coreml_minimum_deployment_target)

        # Set feature descriptions (these show up as comments in XCode)
        coreml_model.input_description["input"] = "Deepvac Model Input"
        coreml_model.output_description[
            "classLabel"] = "Most likely image category"

        # Set model author name
        coreml_model.author = '"DeepVAC'

        # Set the license of the model
        coreml_model.license = "Deepvac Lincense"
        coreml_model.short_description = "Powered by DeepVAC"

        # Set a version for the model
        coreml_model.version = self.config.coreml_version if self.config.coreml_version else "1.0"
        # Save the CoreML model
        coreml_model.save(output_coreml_file)
예제 #14
0
    def test_convert_torch_vision_mobilenet_v2(tmpdir):
        import torch
        import torchvision
        """
        In this example, we'll instantiate a PyTorch classification model and convert
        it to Core ML.
        """
        """
        Here we instantiate our model. In a real use case this would be your trained
        model.
        """
        model = torchvision.models.mobilenet_v2()
        """
        The next thing we need to do is generate TorchScript for the model. The easiest
        way to do this is by tracing it.
        """
        """
        It's important that a model be in evaluation mode (not training mode) when it's
        traced. This makes sure things like dropout are disabled.
        """
        model.eval()
        """
        Tracing takes an example input and traces its flow through the model. Here we
        are creating an example image input.

        The rank and shape of the tensor will depend on your model use case. If your
        model expects a fixed size input, use that size here. If it can accept a
        variety of input sizes, it's generally best to keep the example input small to
        shorten how long it takes to run a forward pass of your model. In all cases,
        the rank of the tensor must be fixed.
        """
        example_input = torch.rand(1, 3, 256, 256)
        """
        Now we actually trace the model. This will produce the TorchScript that the
        CoreML converter needs.
        """
        traced_model = torch.jit.trace(model, example_input)
        """
        Now with a TorchScript representation of the model, we can call the CoreML
        converter. The converter also needs a description of the input to the model,
        where we can give it a convenient name.
        """
        mlmodel = ct.convert(
            traced_model,
            inputs=[ct.TensorType(name="input", shape=example_input.shape)],
        )
        """
        Now with a conversion complete, we can save the MLModel and run inference.
        """
        save_path = os.path.join(str(tmpdir), "mobilenet_v2.mlmodel")
        mlmodel.save(save_path)
        """
        Running predict() is only supported on macOS.
        """
        if ct.utils._is_macos():
            results = mlmodel.predict({"input": example_input.numpy()})
            expected = model(example_input)
            np.testing.assert_allclose(list(results.values())[0],
                                       expected.detach().numpy(),
                                       rtol=1e-2)
    def test_multiarray_input_enumerated(self, convert_to):
        if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0):
            return

        example_input = torch.rand(1, 3, 50, 50) * 100
        traced_model = torch.jit.trace(TestConvModule().eval(), example_input)

        input_shape = ct.EnumeratedShapes(shapes=[[1, 3, 25,
                                                   25], [1, 3, 50, 50],
                                                  [1, 3, 67, 67]],
                                          default=[1, 3, 67, 67])
        model = ct.convert(traced_model,
                           inputs=[ct.TensorType(shape=input_shape)],
                           convert_to=convert_to)

        spec = model.get_spec()
        assert list(spec.description.input[0].type.multiArrayType.shape) == [
            1, 3, 67, 67
        ]
        assert list(spec.description.input[0].type.multiArrayType.
                    enumeratedShapes.shapes[0].shape) == [1, 3, 67, 67]
        assert len(spec.description.input[0].type.multiArrayType.
                   enumeratedShapes.shapes) == 3
        _assert_torch_coreml_output_shapes(model, spec, traced_model,
                                           example_input)
예제 #16
0
    def test_torch_outofbound_range_dim(use_symbol):
        import torch

        num_tokens = 3
        embedding_size = 5

        class TestModule(torch.nn.Module):
            def __init__(self):
                super(TestModule, self).__init__()
                self.embedding = torch.nn.Embedding(num_tokens, embedding_size)

            def forward(self, x):
                return self.embedding(x)

        model = TestModule()
        model.eval()

        example_input = torch.randint(high=num_tokens, size=(3,),
                dtype=torch.int64)
        traced_model = torch.jit.trace(model, example_input)

        if use_symbol:
            seq_len_dim = ct.RangeDim(symbol='len', lower_bound=3,
                    upper_bound=5)
        else:
            # symbol is optional
            seq_len_dim = ct.RangeDim(lower_bound=3, upper_bound=5)
        seq_input = ct.TensorType(name="input", shape=(seq_len_dim,),
                dtype=np.int64)
        mlmodel = ct.convert(
            traced_model,
            inputs=[seq_input],
        )

        if ct.utils._is_macos():
            result = mlmodel.predict(
                {"input": example_input.detach().numpy().astype(np.float32)}
            )

            # Verify outputs
            expected = model(example_input)
            name = list(result.keys())[0]
            np.testing.assert_allclose(result[name], expected.detach().numpy())

            # seq_len below/above lower_bound/upper_bound
            with pytest.raises(RuntimeError,
                    match=r"not compatible with the model\'s feature"):
                example_input2 = torch.randint(high=num_tokens, size=(99,),
                        dtype=torch.int64)
                result = mlmodel.predict(
                    {"input": example_input2.detach().numpy().astype(np.float32)}
                )

            with pytest.raises(RuntimeError,
                    match=r"not compatible with the model\'s feature"):
                example_input2 = torch.randint(high=num_tokens, size=(2,),
                        dtype=torch.int64)
                result = mlmodel.predict(
                    {"input": example_input2.detach().numpy().astype(np.float32)}
                )
예제 #17
0
 def test_input_dtype_default(self, int32_input_model):
     #if dtype is not provided it defaults to float32
     mlmodel = ct.convert(int32_input_model,
                          inputs=[ct.TensorType(shape=(10, 20))],
                          minimum_deployment_target=ct.target.macOS12)
     assert_input_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
예제 #18
0
    def test_convert_torch_traced_model(tmpdir):
        import torch
        from torch import nn

        class Network(nn.Module):
            def __init__(self):
                super(Network, self).__init__()
                self.hidden = nn.Linear(100, 10)
                self.output = nn.Linear(10, 2)
                self.sigmoid = nn.Sigmoid()
                self.softmax = nn.Softmax(dim=1)

            def forward(self, x):
                x = self.hidden(x)
                x = self.sigmoid(x)
                x = self.output(x)
                x = self.softmax(x)
                return x

        torch_model = Network()
        torch_model.eval()
        example_input = torch.rand(1, 100)
        traced_model = torch.jit.trace(torch_model, example_input)
        model = ct.convert(
            traced_model,
            inputs=[ct.TensorType(name="input", shape=example_input.shape)],
            convert_to='mil')
        assert isinstance(model, ct.converters.mil.Program)
예제 #19
0
    def test_convert_tf_keras_applications_model(dtype):
        import tensorflow as tf

        tf_keras_model = tf.keras.applications.MobileNet(weights="imagenet",
                                                         input_shape=(224, 224,
                                                                      3))

        # inputs / outputs are optional, we can get from tf.keras model
        # this can be extremely helpful when we want to extract sub-graphs
        input_name = tf_keras_model.inputs[0].name.split(":")[0]
        # note that the `convert()` requires tf.Graph's outputs instead of
        # tf.keras.Model's outputs, to access that, we can do the following
        output_name = tf_keras_model.outputs[0].name.split(":")[0]
        tf_graph_output_name = output_name.split("/")[-1]

        if dtype == 'default':
            dtype = None
        elif dtype == 'mil_type':
            dtype = ct.converters.mil.types.fp32
        else:
            dtype = np.float32

        mlmodel = ct.convert(
            tf_keras_model,
            inputs=[ct.TensorType(shape=(1, 224, 224, 3), dtype=dtype)],
            outputs=[tf_graph_output_name],
        )
        mlmodel.save("./mobilenet.mlmodel")
예제 #20
0
 def test_grayscale_fp16_output_image(self, rank4_grayscale_input_model):
     mlmodel = ct.convert(
         rank4_grayscale_input_model,
         inputs=[ct.TensorType(name="input", shape=(1, 1, 10, 20))],
         outputs=[
             ct.ImageType(name="output_image",
                          color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
         ],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     sample_input = np.random.randint(low=0, high=200,
                                      size=(1, 1, 10,
                                            20)).astype(np.float32)
     model_output_pil_image = mlmodel.predict({"input": sample_input
                                               })['output_image']
     assert isinstance(model_output_pil_image, Image.Image)
     assert model_output_pil_image.mode == "F"
     model_output_as_numpy = np.array(model_output_pil_image)
     reference_output = rank4_grayscale_input_model(
         torch.from_numpy(sample_input)).detach().numpy()
     reference_output = np.squeeze(reference_output)
     np.testing.assert_allclose(reference_output,
                                model_output_as_numpy,
                                rtol=1e-2,
                                atol=1e-2)
예제 #21
0
    def test_fully_dynamic_inputs():
        """
        All dims of the inputs are dynamic, and write to slice to one of the
        inputs.
        """
        import torch

        class Model(torch.nn.Module):
            def __init__(self, index):
                super(Model, self).__init__()
                self.index = index

            def forward(self, x, y):
                x[:, int(self.index.item())] = 0.0
                y = y.unsqueeze(0)
                return y, x

        model = Model(torch.tensor(3))
        scripted_model = torch.jit.script(model)

        mlmodel = ct.convert(
            scripted_model,
            inputs=[
                ct.TensorType("x", shape=(ct.RangeDim(), ct.RangeDim())),
                ct.TensorType("y", shape=(ct.RangeDim(), ct.RangeDim()))
            ],
        )

        # running predict() is supported on macOS
        if ct.utils._is_macos():
            x, y = torch.rand(2, 4), torch.rand(1, 2)
            torch_res = model(x, y)
            results = mlmodel.predict({
                "x": x.cpu().detach().numpy(),
                "y": y.cpu().detach().numpy()
            })
            np.testing.assert_allclose(torch_res[0], results['y.3'])
            np.testing.assert_allclose(torch_res[1], results['x'])

            x, y = torch.rand(1, 6), torch.rand(2, 3)
            torch_res = model(x, y)
            results = mlmodel.predict({
                "x": x.cpu().detach().numpy(),
                "y": y.cpu().detach().numpy()
            })
            np.testing.assert_allclose(torch_res[0], results['y.3'])
            np.testing.assert_allclose(torch_res[1], results['x'])
예제 #22
0
def test_mlmodel_demo(tmpdir):
    NUM_TOKENS = 3
    EMBEDDING_SIZE = 5

    class TestModule(torch.nn.Module):
        def __init__(self):
            super(TestModule, self).__init__()
            self.embedding = torch.nn.Embedding(NUM_TOKENS, EMBEDDING_SIZE)

        def forward(self, x):
            return self.embedding(x)

    model = TestModule()
    model.eval()

    example_input = torch.randint(high=NUM_TOKENS,
                                  size=(2, ),
                                  dtype=torch.int64)
    traced_model = torch.jit.trace(model, example_input)
    mlmodel = ct.convert(traced_model,
                         source='pytorch',
                         convert_to='mlprogram',
                         inputs=[
                             ct.TensorType(
                                 name="input",
                                 shape=example_input.shape,
                                 dtype=example_input.numpy().dtype,
                             )
                         ],
                         compute_precision=ct.precision.FLOAT32)
    # `coremltools_internal.convert` returns
    # `coremltools_internal.models.MLModel` for `mlprogram` and `neuralnetwork`
    # backend
    assert isinstance(mlmodel, MLModel)

    # mlpackage_path is a model package
    mlpackage_path = os.path.join(str(tmpdir), 'mymodel.mlpackage')
    mlmodel.save(mlpackage_path)

    # Read back the saved bundle and compile
    mlmodel2 = MLModel(mlpackage_path)

    if not _IS_MACOS or _macos_version() < (12, 0):
        # Can not get predictions unless on macOS 12 or higher.
        shutil.rmtree(mlpackage_path)
        return

    result = mlmodel2.predict(
        {"input": example_input.cpu().detach().numpy().astype(np.float32)},
        useCPUOnly=True,
    )

    # Verify outputs
    expected = model(example_input)
    name = list(result.keys())[0]
    np.testing.assert_allclose(result[name], expected.cpu().detach().numpy())

    # Cleanup package
    shutil.rmtree(mlpackage_path)
예제 #23
0
    def test_torch_optional_input():
        import torch

        num_tokens = 3
        embedding_size = 5

        class TestModule(torch.nn.Module):
            def __init__(self):
                super(TestModule, self).__init__()
                self.embedding = torch.nn.Embedding(num_tokens, embedding_size)

            def forward(self, x, y):
                return self.embedding(x) + y

        model = TestModule()
        model.eval()

        example_input = [
            torch.randint(high=num_tokens, size=(2, ), dtype=torch.int64),
            torch.rand(1),
        ]
        traced_model = torch.jit.trace(model, example_input)

        required_input = ct.TensorType(name="required_input",
                                       shape=(ct.RangeDim(), ),
                                       dtype=np.int64)
        default_value = np.array([3]).astype(np.float32)
        optional_input = ct.TensorType(name="optional_input",
                                       shape=(1, ),
                                       default_value=default_value)
        mlmodel = ct.convert(
            traced_model,
            inputs=[required_input, optional_input],
        )

        if ct.utils._is_macos():
            result = mlmodel.predict({
                "required_input":
                example_input[0].detach().numpy().astype(np.float32)
            })

            # Verify outputs
            torch_default_value = torch.tensor([3])
            expected = model(example_input[0].detach(), torch_default_value)
            name = list(result.keys())[0]
            np.testing.assert_allclose(result[name], expected.detach().numpy())
예제 #24
0
 def test_input_dtype_user_provided(self, int32_input_model):
     # test that provided dtype in the api overrides the input dtype in the TF model
     mlmodel = ct.convert(int32_input_model,
                          inputs=[ct.TensorType(dtype=np.float32)],
                          minimum_deployment_target=ct.target.macOS12)
     assert_input_dtype(mlmodel, expected_type_str="fp32")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
예제 #25
0
def create_core_ml_for_tensorflow_preprocessing():
    input = ct.TensorType(shape=(1, 224, 224, 3))
    keras_model = VGGFace(model="senet50",
                          pooling="avg",
                          include_top=False,
                          input_shape=(224, 224, 3))
    coreml_model = ct.convert(keras_model, source='tensorflow', inputs=[input])
    write_metadata(coreml_model)
    coreml_model.save("Face-without-preprocessing.mlmodel")
예제 #26
0
 def test_input_shape_missing_error(self, float32_input_model_add_op):
     with pytest.raises(
             ValueError,
             match=
             "'shape' must be provided in the 'inputs' argument for pytorch conversion"
     ):
         mlmodel = ct.convert(float32_input_model_add_op,
                              inputs=[ct.TensorType(dtype=np.int32)],
                              minimum_deployment_target=ct.target.macOS12)
예제 #27
0
 def test_input_dtype_user_provided(self, float32_input_model_add_op):
     # test that provided dtype in the api is applied
     mlmodel = ct.convert(
         float32_input_model_add_op,
         inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)],
         minimum_deployment_target=ct.target.macOS12)
     assert_input_dtype(mlmodel, expected_type_str="int32")
     assert_output_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
def get_input(name: str, inp: dict):
    shape = get_shape(inp)
    if 'type' in inp and inp['type'].upper() == 'IMAGE':
        return ct.ImageType(name=name,
                            shape=shape,
                            bias=inp.get('bias'),
                            scale=inp.get('scale'))
    else:
        return ct.TensorType(name=name, shape=shape)
예제 #29
0
 def test_unsupported_input_dtype_in_torch_model(self, int64_input_model):
     # test that no error is raised when no dtype is provided by the user,
     # and the Torch model's input dtype is not supported.
     # In this case, it will be mapped to the default dtype which is float32
     mlmodel = ct.convert(int64_input_model,
                          inputs=[ct.TensorType(shape=(10, 20))],
                          minimum_deployment_target=ct.target.macOS12)
     assert_input_dtype(mlmodel, expected_type_str="fp32")
     verify_prediction(mlmodel)
예제 #30
0
    def test_torch_enumerated_shapes():
        import torch

        in_channels = 3
        out_channels = 2
        kernel_size = 3

        class TestModule(torch.nn.Module):
            def __init__(self):
                super(TestModule, self).__init__()
                self.conv = torch.nn.Conv2d(in_channels, out_channels,
                                            kernel_size)

            def forward(self, x):
                return self.conv(x)

        model = TestModule()
        model.eval()

        example_input = torch.randn(1, 3, 28, 28)
        traced_model = torch.jit.trace(model, example_input)

        shapes = [(1, 3, 28, 28), (1, 3, 56, 56)]
        enumerated_shapes = ct.EnumeratedShapes(shapes=shapes)
        tensor_input = ct.TensorType(name="input", shape=enumerated_shapes)

        mlmodel = ct.convert(
            traced_model,
            inputs=[tensor_input],
        )

        if ct.utils._is_macos():
            result = mlmodel.predict(
                {"input": example_input.detach().numpy().astype(np.float32)},
                useCPUOnly=True,
            )

            # Verify outputs
            expected = model(example_input)
            name = list(result.keys())[0]
            np.testing.assert_allclose(result[name],
                                       expected.detach().numpy(),
                                       rtol=1e-3,
                                       atol=1e-4)

            # Test (1, 3, 56, 56) shape (can't verify numerical parity with Torch
            # which doesn't support enumerated shape)
            test_input_x = np.random.rand(*shapes[1]).astype(np.float32)
            results = mlmodel.predict({"input": test_input_x})

            # Test with a wrong shape
            with pytest.raises(
                    RuntimeError,
                    match=r"not compatible with the model\'s feature"):
                test_input_x = np.random.rand(1, 3, 29, 29).astype(np.float32)
                results = mlmodel.predict({"input": test_input_x})