Пример #1
0
    def test_color_input(self, rank4_input_model, rank3_input_model):
        mlmodel = ct.convert(
            rank4_input_model,
            inputs=[
                ct.ImageType(shape=(1, 3, 10, 20),
                             color_layout=ct.colorlayout.RGB)
            ],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "add", "cast"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program,
                                expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        with pytest.raises(ValueError, match="must have rank 4"):
            mlmodel = ct.convert(
                rank3_input_model,
                inputs=[
                    ct.ImageType(shape=(1, 10, 20),
                                 color_layout=ct.colorlayout.RGB)
                ],
                minimum_deployment_target=ct.target.macOS12,
            )
Пример #2
0
    def process(self, cast_output_file=None):
        try:
            import coremltools
        except:
            LOG.logE(
                "You need to install coremltools package if you want to convert PyTorch to CoreML model. E.g. pip install --upgrade coremltools"
            )
            return

        output_coreml_file = self.config.coreml_model_dir
        if cast_output_file:
            output_coreml_file = '{}/coreml__{}.mlmodel'.format(
                self.config.output_dir, cast_output_file)
            self.config.coreml_model_dir = output_coreml_file

        LOG.logI(
            "config.coreml_model_dir found, save coreml model to {}...".format(
                self.config.coreml_model_dir))
        model = self.config.script_model_dir
        if self.config.script_model_dir is None:
            model = self.config.trace_model_dir
        #input mode
        if self.config.coreml_input_type == 'image':
            input = coremltools.ImageType(
                name="input",
                shape=tuple(self.config.sample.shape),
                scale=self.config.coreml_scale,
                color_layout=self.config.coreml_color_layout,
                bias=[
                    self.config.coreml_blue_bias,
                    self.config.coreml_green_bias, self.config.coreml_red_bias
                ])
        else:
            input = coremltools.TensorType(name='input',
                                           shape=tuple(
                                               self.config.sample.shape))
        #convert
        coreml_model = coremltools.convert(
            model=model,
            inputs=[input],
            classfier_config=self.config.coreml_classfier_config,
            minimum_deployment_target=self.config.
            coreml_minimum_deployment_target)

        # Set feature descriptions (these show up as comments in XCode)
        coreml_model.input_description["input"] = "Deepvac Model Input"
        coreml_model.output_description[
            "classLabel"] = "Most likely image category"

        # Set model author name
        coreml_model.author = '"DeepVAC'

        # Set the license of the model
        coreml_model.license = "Deepvac Lincense"
        coreml_model.short_description = "Powered by DeepVAC"

        # Set a version for the model
        coreml_model.version = self.config.coreml_version if self.config.coreml_version else "1.0"
        # Save the CoreML model
        coreml_model.save(output_coreml_file)
    def test_image_input_enumerated(self, convert_to):
        if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0):
            return

        example_input = torch.rand(1, 3, 50, 50) * 255
        traced_model = torch.jit.trace(TestConvModule().eval(), example_input)

        input_shape = ct.EnumeratedShapes(shapes=[[1, 3, 25,
                                                   25], [1, 3, 50, 50],
                                                  [1, 3, 67, 67]],
                                          default=[1, 3, 67, 67])
        model = ct.convert(traced_model,
                           inputs=[ct.ImageType(shape=input_shape)],
                           convert_to=convert_to)

        spec = model.get_spec()
        assert spec.description.input[0].type.imageType.width == 67
        assert spec.description.input[0].type.imageType.height == 67
        assert len(spec.description.input[0].type.imageType.enumeratedSizes.
                   sizes) == 3
        assert spec.description.input[0].type.imageType.enumeratedSizes.sizes[
            0].width == 25
        assert spec.description.input[0].type.imageType.enumeratedSizes.sizes[
            0].height == 25
        _assert_torch_coreml_output_shapes(model,
                                           spec,
                                           traced_model,
                                           example_input,
                                           is_image_input=True)
Пример #4
0
 def test_grayscale_fp16_output_image(self, rank4_grayscale_input_model):
     mlmodel = ct.convert(
         rank4_grayscale_input_model,
         inputs=[ct.TensorType(name="input", shape=(1, 1, 10, 20))],
         outputs=[
             ct.ImageType(name="output_image",
                          color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
         ],
         minimum_deployment_target=ct.target.macOS13,
         compute_precision=ct.precision.FLOAT32,
     )
     sample_input = np.random.randint(low=0, high=200,
                                      size=(1, 1, 10,
                                            20)).astype(np.float32)
     model_output_pil_image = mlmodel.predict({"input": sample_input
                                               })['output_image']
     assert isinstance(model_output_pil_image, Image.Image)
     assert model_output_pil_image.mode == "F"
     model_output_as_numpy = np.array(model_output_pil_image)
     reference_output = rank4_grayscale_input_model(
         torch.from_numpy(sample_input)).detach().numpy()
     reference_output = np.squeeze(reference_output)
     np.testing.assert_allclose(reference_output,
                                model_output_as_numpy,
                                rtol=1e-2,
                                atol=1e-2)
Пример #5
0
    def test_program_bgr(self):
        """
        Input graph:

        main(x: ImageType(color_layout="BGR", channel_first=True)) {
            y1 = relu(x)
            y2 = relu(x)
            output = add(y1, y2)
        } [output]

        Output graph:

        main(x: ImageType(channel_first=True)) {
            y1 = relu(x)
            y2 = relu(x)
            output = add(y1, y2)
        } [output]
        """
        @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))])
        def prog(x):
            y1 = mb.relu(x=x)
            y2 = mb.relu(x=x)
            z = mb.add(x=y1, y=y2)
            return z

        prog.main_input_types = (ct.ImageType(name='x',
                                              shape=[1, 3, 20, 20],
                                              color_layout="BGR",
                                              channel_first=True), )

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "mil_backend::insert_image_preprocessing_ops")
        assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"]
        assert get_op_types_in_program(prog) == ["relu", "relu", "add"]
Пример #6
0
def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
    # YOLOv5 CoreML export
    try:
        check_requirements(('coremltools',))
        import coremltools as ct

        LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
        f = file.with_suffix('.mlmodel')

        ts = torch.jit.trace(model, im, strict=False)  # TorchScript model
        ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
        bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
        if bits < 32:
            if platform.system() == 'Darwin':  # quantization only supported on macOS
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore", category=DeprecationWarning)  # suppress numpy==1.20 float warning
                    ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
            else:
                print(f'{prefix} quantization only supported on macOS, skipping...')
        ct_model.save(f)

        LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
        return ct_model, f
    except Exception as e:
        LOGGER.info(f'\n{prefix} export failure: {e}')
        return None, None
Пример #7
0
def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
    # YOLOv5 CoreML export
    ct_model = None
    try:
        check_requirements(('coremltools', ))
        import coremltools as ct

        print(
            f'\n{prefix} starting export with coremltools {ct.__version__}...')
        f = file.with_suffix('.mlmodel')

        model.train()  # CoreML exports should be placed in model.train() mode
        ts = torch.jit.trace(model, im, strict=False)  # TorchScript model
        ct_model = ct.convert(ts,
                              inputs=[
                                  ct.ImageType('image',
                                               shape=im.shape,
                                               scale=1 / 255.0,
                                               bias=[0, 0, 0])
                              ])
        ct_model.save(f)

        print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
    except Exception as e:
        print(f'\n{prefix} export failure: {e}')

    return ct_model
Пример #8
0
    def test_mil_enumerated_image(self):
        enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20),
                                   (1, 3, 10, 30)])
        input_shape = [
            ct.ImageType(name="x",
                         shape=ct.EnumeratedShapes(shapes=enumerated_shapes))
        ]
        mlmodel = ct.convert(self.basic_network,
                             source="milinternal",
                             convert_to="mlprogram",
                             inputs=input_shape)
        input_spec = mlmodel.get_spec().description.input
        assert len(input_spec) == 1, "1 input expected, got {} instead".format(
            len(input_spec))
        assert input_spec[
            0].name == "x", "input name in MLModel is {}, 'x' is expected".format(
                input_spec[0].name)
        assert input_spec[0].type.WhichOneof(
            "Type") == "imageType", "Expected imageType, got {}".format(
                input_spec[0].type.WhichOneof("Type"))
        assert input_spec[0].type.imageType.WhichOneof(
            "SizeFlexibility"
        ) == "enumeratedSizes", "Expected enumeratedShapes in ShapeFlexibility"

        spec_H = input_spec[0].type.imageType.height
        spec_W = input_spec[0].type.imageType.width
        assert spec_H == 10 and spec_W == 10, "expected [H, W] == [10, 10], got [{}, {}] instead".format(
            spec_H, spec_W)

        spec_enumerated_shapes = set()
        for enumerated in input_spec[0].type.imageType.enumeratedSizes.sizes:
            spec_enumerated_shapes.add(
                tuple([1, 3, enumerated.height, enumerated.width]))
        assert spec_enumerated_shapes == set(
            enumerated_shapes), "Enumerated shape mismatch"
def get_input(name: str, inp: dict):
    shape = get_shape(inp)
    if 'type' in inp and inp['type'].upper() == 'IMAGE':
        return ct.ImageType(name=name,
                            shape=shape,
                            bias=inp.get('bias'),
                            scale=inp.get('scale'))
    else:
        return ct.TensorType(name=name, shape=shape)
Пример #10
0
    def test_color_output(self, rank4_input_model, float32_input_model_add_op):
        # check that an error is raised if the output shape is not of form (1, 3, H, W)
        with pytest.raises(ValueError,
                           match="must have rank 4. Instead it has rank 2"):
            ct.convert(float32_input_model_add_op,
                       inputs=[ct.TensorType(shape=(10, 20))],
                       outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                       minimum_deployment_target=ct.target.macOS13)

        mlmodel = ct.convert(
            rank4_input_model,
            inputs=[
                ct.ImageType(shape=(1, 3, 10, 20),
                             color_layout=ct.colorlayout.BGR)
            ],
            outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
            minimum_deployment_target=ct.target.macOS13,
        )
        assert_ops_in_mil_program(mlmodel,
                                  expected_op_list=["cast", "add", "cast"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        assert_spec_output_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program,
                                expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        # check neural network conversion
        mlmodel = ct.convert(
            rank4_input_model,
            inputs=[
                ct.ImageType(shape=(1, 3, 10, 20),
                             color_layout=ct.colorlayout.RGB)
            ],
            outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)],
        )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_spec_output_image_type(
            mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        verify_prediction(mlmodel)
Пример #11
0
    def test_grayscale_output(self, rank4_grayscale_input_model, rank4_grayscale_input_model_with_channel_first_output):
        # check that an error is raised if the output shape is not of form (1, 1, H, W)
        with pytest.raises(ValueError, match="Shape of the Grayscale image output,"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 )

        with pytest.raises(TypeError, match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                                 outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 minimum_deployment_target=ct.target.macOS12,
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        verify_prediction(mlmodel)

        mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_cast_ops_count(mlmodel, expected_count=0)
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16")
        verify_prediction(mlmodel)

        mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16")
        verify_prediction(mlmodel)
Пример #12
0
def apply(model,
          outfile,
          *,
          input_w=129,
          input_h=97,
          minimum_deployment_target='iOS14'):
    assert coremltools is not None
    image_size_warning(model.base_net.stride, input_w, input_h)

    # configure: inplace-ops are not supported
    openpifpaf.network.heads.CompositeField3.inplace_ops = False
    openpifpaf.network.heads.CompositeField4.inplace_ops = False

    dummy_input = torch.randn(1, 3, input_h, input_w)
    with torch.no_grad():
        traced_model = torch.jit.trace(model, dummy_input)

    coreml_model = coremltools.convert(
        traced_model,
        inputs=[
            coremltools.ImageType(name='image',
                                  shape=dummy_input.shape,
                                  bias=[-1.0, -1.0, -1.0],
                                  scale=1.0 / 127.0)
        ],
        # classifier_config = ct.ClassifierConfig(class_labels)
        minimum_deployment_target=getattr(coremltools.target,
                                          minimum_deployment_target),
    )

    # pylint: disable=protected-access
    coremltools.models.utils.rename_feature(
        coreml_model._spec, coreml_model._spec.description.output[0].name,
        'cif_head')
    coremltools.models.utils.rename_feature(
        coreml_model._spec, coreml_model._spec.description.output[1].name,
        'caf_head')

    # Meta
    coreml_model.input_description['image'] = 'Input image to be classified'
    coreml_model.output_description['cif_head'] = 'Composite Intensity Field'
    coreml_model.output_description['caf_head'] = 'Composite Association Field'
    coreml_model.author = 'Kreiss, Bertoni, Alahi: Composite Fields for Human Pose Estimation'
    coreml_model.license = 'Please see https://github.com/openpifpaf/openpifpaf'
    coreml_model.short_description = 'Composite Fields for Human Pose Estimation'
    coreml_model.version = openpifpaf.__version__

    coreml_model.save(outfile)

    # test predict
    image_input = PIL.Image.new('RGB', (input_w, input_h))
    test_predict = coreml_model.predict({'image': image_input})
    print('!!!!!!!!', test_predict)
Пример #13
0
    def test_color_output(self, rank4_input_model, rank4_input_model_with_channel_first_output):
        # check that an error is raised if the output shape is not of form (1, 3, H, W)
        with pytest.raises(ValueError, match="Shape of the RGB/BGR image output,"):
            mlmodel = ct.convert(rank4_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                                 outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        mlmodel = ct.convert(rank4_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        # check neural network conversion
        mlmodel = ct.convert(rank4_input_model_with_channel_first_output,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)],
                             outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)],
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB)
        assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR)
        verify_prediction(mlmodel)
Пример #14
0
 def test_torch_image_enumerated_shapes():
     import torch
     import torchvision
     torch_model = torchvision.models.mobilenet_v2().features
     torch_model.eval()
     example_input = torch.rand(1, 3, 256, 256)
     traced_model = torch.jit.trace(torch_model, example_input)
     input_shapes = ct.EnumeratedShapes(shapes=[(1, 3, 256, 256), (1, 3, 224, 224)])
     image_input = ct.ImageType(shape=input_shapes,
                                bias=[-1, -1, -1], scale=1 / 127)
     model = ct.convert(traced_model, inputs=[image_input])
     assert model is not None
     spec = model.get_spec()
     assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 2
Пример #15
0
 def test_tf2_image_enumerated_shapes():
     import tensorflow as tf
     keras_model = tf.keras.applications.MobileNetV2(
         input_shape=(None, None, 3,),
         classes=1000,
         include_top=False,
     )
     input_shapes = ct.EnumeratedShapes(shapes=[(1, 192, 192, 3), (1, 224, 224, 3)])
     image_input = ct.ImageType(shape=input_shapes,
                                bias=[-1,-1,-1], scale=1/127)
     model = ct.convert(keras_model, inputs=[image_input])
     assert model is not None
     spec = model.get_spec()
     assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 2
Пример #16
0
    def test_scale_bias_types(self, scale_type, bias_type):
        """
        Input graph:

        main(x: ImageType(color_layout="RGB", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) {
            y1 = relu(x)
            y2 = relu(x)
            output = add(y1, y2)
        } [output]

        Output graph:

        main(x: ImageType(channel_first=True)) {
            y = mul(x, scale)
            y_bias = add(y, bias)
            y1 = relu(y_bias)
            y2 = relu(y_bias)
            output = add(y1, y2)
        } [output]
        """
        @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))])
        def prog(x):
            y1 = mb.relu(x=x)
            y2 = mb.relu(x=x)
            z = mb.add(x=y1, y=y2)
            return z

        prog.main_input_types = (ct.ImageType(name='x',
                                              shape=[1, 3, 20, 20],
                                              scale=scale_type(2.0),
                                              bias=np.array(
                                                  [1, 2, 3]).astype(bias_type),
                                              color_layout="RGB",
                                              channel_first=True), )

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "mil_backend::insert_image_preprocessing_ops")
        assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"]
        assert get_op_types_in_program(prog) == [
            "mul", "add", "relu", "relu", "add"
        ]
        scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0]
        assert scale_op.y.dtype() == prog.functions["main"].inputs["x"].dtype()
        add_op = prog.find_ops(op_type="add", exactly_one=False)[0]
        assert add_op.y.dtype() == prog.functions["main"].inputs["x"].dtype()
Пример #17
0
def convert_to_core_ml():
    image_input = ct.ImageType(shape=(
        1,
        224,
        224,
        3,
    ),
                               bias=[-1, -1, -1],
                               scale=1 / 127)
    classifier_config = ct.ClassifierConfig(saved_labels_path)

    ml_model = ct.convert(
        model,
        inputs=[image_input],
        classifier_config=classifier_config,
    )

    ml_model.save(saved_model_path + "/model.mlmodel")
Пример #18
0
    def test_program_grayscale_with_scale_bias(self):
        """
        Input graph:

        main(x: ImageType(scale=2.0, bias=2.0, color_layout="G", channel_first=True)) {
            y1 = relu(x)
            y2 = relu(x)
            output = add(y1, y2)
        } [output]

        Output graph:

        main(x: ImageType(channel_first=True)) {
            y_scaled = mul(x, 2)
            y = add(y_scaled, 2)
            y1 = relu(y)
            y2 = relu(y)
            output = add(y1, y2)
        } [output]
        """
        @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))])
        def prog(x):
            y1 = mb.relu(x=x)
            y2 = mb.relu(x=x)
            z = mb.add(x=y1, y=y2)
            return z

        prog.main_input_types = (ct.ImageType(name='x',
                                              shape=[1, 1, 20, 20],
                                              scale=2.0,
                                              bias=2.0,
                                              color_layout="G",
                                              channel_first=True), )

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, "mil_backend::insert_image_preprocessing_ops")
        assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"]
        assert get_op_types_in_program(prog) == [
            "mul", "add", "relu", "relu", "add"
        ]
        scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0]
        assert scale_op.y.val == 2.0
        add_op = prog.find_ops(op_type="add", exactly_one=False)[0]
        assert add_op.y.val == 2.0
Пример #19
0
def create_core_ml_model_file(keras_model, filename):
    if COREML_FILE_FORMAT not in filename:
        filename += COREML_FILE_FORMAT

    start = time.time()

    image_input = ct.ImageType(shape=(
        1,
        224,
        224,
        3,
    ),
                               bias=[-91.4953, -103.8827, -131.0912],
                               color_layout="BGR")
    coreml_model = ct.convert(keras_model, inputs=[image_input])
    write_metadata(coreml_model)
    coreml_model.save(filename)

    end = time.time()
    print(f"{filename} took {end - start} seconds to create.")
Пример #20
0
def test():
    torch_model = torchvision.models.mobilenet_v2(pretrained=True)
    # Set the model in evaluation mode
    # torch_model.eval()

    # example_input = torch.rand(1, 3, 224, 224)  # after test, will get 'size mismatch' error message with size 256x256
    # traced_model = torch.jit.trace(torch_model, example_input)
    fastpunct = FastPunct()
    traced_model = fastpunct.model
    traced_model.save('aaa.h5')
    # Convert to Core ML using the Unified Conversion API
    model = ct.convert(
        traced_model,
        'pytorch',
        inputs=[ct.ImageType(name="input_1", shape=example_input.shape)
                ]  # provide only if step 2 was performed
    )

    # Save model
    model.save("MobileNetV2.mlmodel")
    def test_image_input_rangedim(self, convert_to):
        example_input = torch.rand(1, 3, 50, 50) * 255
        traced_model = torch.jit.trace(TestConvModule().eval(), example_input)

        input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45),
                                      ct.RangeDim(25, 100, default=45)))
        model = ct.convert(traced_model,
                           inputs=[ct.ImageType(shape=input_shape)],
                           convert_to=convert_to)

        spec = model.get_spec()
        assert spec.description.input[0].type.imageType.width == 45
        assert spec.description.input[0].type.imageType.height == 45
        assert spec.description.input[
            0].type.imageType.imageSizeRange.widthRange.lowerBound == 25
        assert spec.description.input[
            0].type.imageType.imageSizeRange.widthRange.upperBound == 100
        _assert_torch_coreml_output_shapes(model,
                                           spec,
                                           traced_model,
                                           example_input,
                                           is_image_input=True)
Пример #22
0
 def test_grayscale_input_image(self, rank4_grayscale_input_model):
     mlmodel = ct.convert(
         rank4_grayscale_input_model,
         inputs=[
             ct.ImageType(name="input_image",
                          shape=(1, 1, 10, 20),
                          color_layout=ct.colorlayout.GRAYSCALE)
         ],
         outputs=[ct.TensorType(name="output")],
         minimum_deployment_target=ct.target.macOS13,
     )
     sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20))
     img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.uint8),
                                 'L')
     model_output = mlmodel.predict({"input_image": img_input})['output']
     reference_output = rank4_grayscale_input_model(
         torch.from_numpy(sample_input.astype(
             np.float32))).detach().numpy()
     np.testing.assert_allclose(reference_output,
                                model_output,
                                rtol=1e-2,
                                atol=1e-2)
Пример #23
0
    def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model):
        with pytest.raises(ValueError, match="must have rank 4"):
            mlmodel = ct.convert(rank3_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        # invalid shape
        with pytest.raises(ValueError):
            mlmodel = ct.convert(rank4_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                                 minimum_deployment_target=ct.target.macOS13,
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32")
        assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32")
        verify_prediction(mlmodel)

        with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 minimum_deployment_target=ct.target.macOS12,
                                 )

        # test that grayscale_16 raises error when used with neural network
        with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"):
            mlmodel = ct.convert(rank4_grayscale_input_model,
                                 inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                                 )

        mlmodel = ct.convert(rank4_grayscale_input_model,
                             inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)],
                             outputs=[ct.TensorType(dtype=np.float16)],
                             minimum_deployment_target=ct.target.macOS13,
                             )
        assert_ops_in_mil_program(mlmodel, expected_op_list=["transpose", "add"])
        assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16)
        assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16")
        assert_output_dtype(mlmodel, expected_type_str="fp16")
        verify_prediction(mlmodel)
Пример #24
0
def convertToCoremlSpec(torchScript, sampleInput):
    """
    Converts a torchscript to a coreml model
    """
    try:
        print(f"Starting CoreML conversion with coremltools {ct.__version__}")
        nnSpec = ct.convert(
            torchScript,
            inputs=[
                ct.ImageType(
                    name="image",
                    shape=sampleInput.shape,
                    scale=1 / 255.0,
                    bias=[0, 0, 0],
                )
            ],
        ).get_spec()

        print(f"CoreML conversion success")
    except Exception as e:
        print(f"CoreML conversion failure: {e}")
        return
    return nnSpec
Пример #25
0
    def test_mil_ranged_image_with_default(self):
        input_shape = [
            ct.ImageType(name="x",
                         shape=(1, 3, 10, ct.RangeDim(10, 30, default=20)))
        ]
        mlmodel = ct.convert(self.basic_network,
                             source="milinternal",
                             convert_to="mlprogram",
                             inputs=input_shape)
        input_spec = mlmodel.get_spec().description.input
        assert len(input_spec) == 1, "1 input expected, got {} instead".format(
            len(input_spec))
        assert input_spec[
            0].name == "x", "input name in MLModel is {}, 'x' is expected".format(
                input_spec[0].name)
        assert input_spec[0].type.WhichOneof(
            "Type") == "imageType", "Expected imageType, got {}".format(
                input_spec[0].type.WhichOneof("Type"))
        assert input_spec[0].type.imageType.WhichOneof(
            "SizeFlexibility"
        ) == "imageSizeRange", "Expected imageSizeRange in ShapeFlexibility"

        spec_H = input_spec[0].type.imageType.height
        spec_W = input_spec[0].type.imageType.width
        assert spec_H == 10 and spec_W == 20, "expected [H, W] == [10, 20], got [{}, {}] instead".format(
            spec_H, spec_W)

        spec_H_range = [
            input_spec[0].type.imageType.imageSizeRange.heightRange.lowerBound,
            input_spec[0].type.imageType.imageSizeRange.heightRange.upperBound
        ]
        spec_W_range = [
            input_spec[0].type.imageType.imageSizeRange.widthRange.lowerBound,
            input_spec[0].type.imageType.imageSizeRange.widthRange.upperBound
        ]
        assert spec_H_range == [10, 10], "Ranged height mismatch"
        assert spec_W_range == [10, 30], "Ranged width mismatch"
Пример #26
0
def model_conversion():
    print('Running model conversion')
    opt, model = load_model_with_options()
    if opt.eval:
        model.eval()

    # Read image and create input tensor
    input_tensor = create_normalized_tensor(opt.input_img)

    # Model conversion
    model.netG.eval()
    # # Step 1 - create a traced model
    traced_model = torch.jit.trace(model.netG, input_tensor)

    if opt.core_input == 'image':
        ssmodel = ct.convert(traced_model,
                             inputs=[
                                 ct.ImageType(name="input1",
                                              shape=input_tensor.shape,
                                              bias=[-1, -1, -1],
                                              scale=1 / 127.0)
                             ])
        ssmodel.save(opt.model_path)
        # Test model
        input_img = Image.open(opt.input_img)
        res = ssmodel.predict({"input1": input_img})
    elif opt.core_input == 'tensor':
        ssmodel = ct.convert(
            traced_model,
            inputs=[ct.TensorType(name="input1", shape=input_tensor.shape)])
        ssmodel.save(opt.model_path)
        # Test model
        res = ssmodel.predict({"input1": input_tensor.numpy()})

    if opt.res_img != '':
        write_clmodel_res(opt.res_img, res['226'])
Пример #27
0
    def test_grayscale_fp16_input_image(self, rank4_grayscale_input_model):
        mlmodel = ct.convert(
            rank4_grayscale_input_model,
            inputs=[
                ct.ImageType(name="input_image",
                             shape=(1, 1, 10, 20),
                             color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)
            ],
            outputs=[ct.TensorType(name="output")],
            minimum_deployment_target=ct.target.macOS13,
        )

        # incorrect way to do prediction
        with pytest.raises(
                TypeError,
                match="must be of type PIL.Image.Image with mode=='F'",
        ):
            sample_input = np.random.randint(low=0,
                                             high=246,
                                             size=(1, 1, 10, 20))
            img_input = Image.fromarray(
                sample_input[0, 0, :, :].astype(np.uint8), 'L')
            mlmodel.predict({"input_image": img_input})

        # correct way to do prediction
        sample_input = np.random.rand(1, 1, 10, 20)  # in between [0, 1]
        img_input = Image.fromarray(
            sample_input[0, 0, :, :].astype(np.float32), 'F')
        model_output = mlmodel.predict({"input_image": img_input})['output']
        reference_output = rank4_grayscale_input_model(
            torch.from_numpy(sample_input.astype(
                np.float32))).detach().numpy()
        np.testing.assert_allclose(reference_output,
                                   model_output,
                                   rtol=1e-2,
                                   atol=1e-2)
Пример #28
0
        print(onnx.helper.printable_graph(
            onnx_model.graph))  # print a human readable model
        print('ONNX export success, saved as %s' % f)
    except Exception as e:
        print('ONNX export failure: %s' % e)

    # CoreML export
    try:
        import coremltools as ct

        print('\nStarting CoreML export with coremltools %s...' %
              ct.__version__)
        # convert model from torchscript and apply pixel scaling as per detect.py
        model = ct.convert(ts,
                           inputs=[
                               ct.ImageType(name='image',
                                            shape=img.shape,
                                            scale=1 / 255.0,
                                            bias=[0, 0, 0])
                           ])
        f = opt.weights.replace('.pt', '.mlmodel')  # filename
        model.save(f)
        print('CoreML export success, saved as %s' % f)
    except Exception as e:
        print('CoreML export failure: %s' % e)

    # Finish
    print(
        '\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.'
        % (time.time() - t))
Пример #29
0
        print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
        f = opt.weights.replace('.pt', '.onnx')  # filename
        model.fuse()  # only for ONNX
        torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
                          output_names=['classes', 'boxes'] if y is None else ['output'])

        # Checks
        onnx_model = onnx.load(f)  # load onnx model
        onnx.checker.check_model(onnx_model)  # check onnx model
        print(onnx.helper.printable_graph(onnx_model.graph))  # print a human readable model
        print('ONNX export success, saved as %s' % f)
    except Exception as e:
        print('ONNX export failure: %s' % e)

    # CoreML export
    try:
        import coremltools as ct

        print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
        # convert model from torchscript and apply pixel scaling as per detect.py
        model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
        f = opt.weights.replace('.pt', '.mlmodel')  # filename
        model.save(f)
        print('CoreML export success, saved as %s' % f)
    except Exception as e:
        print('CoreML export failure: %s' % e)

    # Finish
    print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.')
Пример #30
0
def run(
        weights='./yolov5s.pt',  # weights path
        img_size=(640, 640),  # image (height, width)
        batch_size=1,  # batch size
        device='cpu',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        include=('torchscript', 'onnx', 'coreml'),  # include formats
        half=False,  # FP16 half-precision export
        inplace=False,  # set YOLOv5 Detect() inplace=True
        train=False,  # model.train() mode
        optimize=False,  # TorchScript: optimize for mobile
        dynamic=False,  # ONNX: dynamic axes
        simplify=False,  # ONNX: simplify model
        opset_version=12,  # ONNX: opset version
):
    t = time.time()
    include = [x.lower() for x in include]
    img_size *= 2 if len(img_size) == 1 else 1  # expand

    # Load PyTorch model
    device = select_device(device)
    assert not (
        device.type == 'cpu' and opt.half
    ), '--half only compatible with GPU export, i.e. use --device 0'
    model = attempt_load(weights, map_location=device)  # load FP32 model
    labels = model.names

    # Input
    gs = int(max(model.stride))  # grid size (max stride)
    img_size = [check_img_size(x, gs)
                for x in img_size]  # verify img_size are gs-multiples
    img = torch.zeros(batch_size, 3, *img_size).to(
        device)  # image size(1,3,320,192) iDetection

    # Update model
    if half:
        img, model = img.half(), model.half()  # to FP16
    model.train() if train else model.eval(
    )  # training mode = no Detect() layer grid construction
    for k, m in model.named_modules():
        m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility
        if isinstance(m, Conv):  # assign export-friendly activations
            if isinstance(m.act, nn.Hardswish):
                m.act = Hardswish()
            elif isinstance(m.act, nn.SiLU):
                m.act = SiLU()
        elif isinstance(m, Detect):
            m.inplace = inplace
            m.onnx_dynamic = dynamic
            # m.forward = m.forward_export  # assign forward (optional)

    for _ in range(2):
        y = model(img)  # dry runs
    print(
        f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)"
    )

    # TorchScript export -----------------------------------------------------------------------------------------------
    if 'torchscript' in include or 'coreml' in include:
        prefix = colorstr('TorchScript:')
        try:
            print(
                f'\n{prefix} starting export with torch {torch.__version__}...'
            )
            f = weights.replace('.pt', '.torchscript.pt')  # filename
            ts = torch.jit.trace(model, img, strict=False)
            (optimize_for_mobile(ts) if optimize else ts).save(f)
            print(
                f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)'
            )
        except Exception as e:
            print(f'{prefix} export failure: {e}')

    # ONNX export ------------------------------------------------------------------------------------------------------
    if 'onnx' in include:
        prefix = colorstr('ONNX:')
        try:
            import onnx

            print(f'{prefix} starting export with onnx {onnx.__version__}...')
            f = weights.replace('.pt', '.onnx')  # filename
            torch.onnx.export(
                model,
                img,
                f,
                verbose=False,
                opset_version=opset_version,
                training=torch.onnx.TrainingMode.TRAINING
                if train else torch.onnx.TrainingMode.EVAL,
                do_constant_folding=not train,
                input_names=['images'],
                output_names=['output'],
                dynamic_axes={
                    'images': {
                        0: 'batch',
                        2: 'height',
                        3: 'width'
                    },  # shape(1,3,640,640)
                    'output': {
                        0: 'batch',
                        1: 'anchors'
                    }  # shape(1,25200,85)
                } if dynamic else None)

            # Checks
            model_onnx = onnx.load(f)  # load onnx model
            onnx.checker.check_model(model_onnx)  # check onnx model
            # print(onnx.helper.printable_graph(model_onnx.graph))  # print

            # Simplify
            if simplify:
                try:
                    check_requirements(['onnx-simplifier'])
                    import onnxsim

                    print(
                        f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...'
                    )
                    model_onnx, check = onnxsim.simplify(
                        model_onnx,
                        dynamic_input_shape=dynamic,
                        input_shapes={'images': list(img.shape)}
                        if dynamic else None)
                    assert check, 'assert check failed'
                    onnx.save(model_onnx, f)
                except Exception as e:
                    print(f'{prefix} simplifier failure: {e}')
            print(
                f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)'
            )
        except Exception as e:
            print(f'{prefix} export failure: {e}')

    # CoreML export ----------------------------------------------------------------------------------------------------
    if 'coreml' in include:
        prefix = colorstr('CoreML:')
        try:
            import coremltools as ct

            print(
                f'{prefix} starting export with coremltools {ct.__version__}...'
            )
            assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`'
            model = ct.convert(ts,
                               inputs=[
                                   ct.ImageType('image',
                                                shape=img.shape,
                                                scale=1 / 255.0,
                                                bias=[0, 0, 0])
                               ])
            f = weights.replace('.pt', '.mlmodel')  # filename
            model.save(f)
            print(
                f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)'
            )
        except Exception as e:
            print(f'{prefix} export failure: {e}')

    # Finish
    print(
        f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.'
    )