示例#1
0
    def test_dead_layer_remove_branch(self):
        convergence_tolerance = 1e-8

        input_features = [("input", datatypes.Array(*(2,)))]
        output_features = [("out", None)]

        builder = neural_network.NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        # add condition to break from the loop, if convergence criterion is met
        builder.add_less_than("cond", ["input"], "cond", alpha=convergence_tolerance)
        branch_layer = builder.add_branch("branch_layer", "cond")
        builder_ifbranch = neural_network.NeuralNetworkBuilder(
            nn_spec=branch_layer.branch.ifBranch
        )
        builder_ifbranch.add_activation("relu1", "RELU", "input", "relu1_out")
        builder_ifbranch.add_activation("relu2_out", "RELU", "relu1_out", "relu2_out")
        builder_elsebranch = neural_network.NeuralNetworkBuilder(
            nn_spec=branch_layer.branch.elseBranch
        )
        builder_elsebranch.add_activation("linear1", "LINEAR", "input", "linear1_out")
        builder_elsebranch.add_activation(
            "linear2", "LINEAR", "linear1_out", "relu2_out"
        )
        builder.add_squeeze("out", "input", "out", squeeze_all=True)

        mlmodel = MLModel(builder.spec)
        data = np.random.rand(2,)
        data_dict = {"input": data}
        if _IS_MACOS:
            before_pass_out = mlmodel.predict(data_dict, useCPUOnly=True)["out"]
            if DEBUG:
                print(
                    "\n mlmodel description before remove disconnected layers pass: \n"
                )
                print_network_spec(builder.spec, style="coding")
            remove_disconnected_layers(builder.spec)
            if DEBUG:
                print(
                    "\n mlmodel description after remove disconnected layers pass: \n"
                )
                print_network_spec(builder.spec, style="coding")
            mlmodel = MLModel(builder.spec)
            after_pass_out = mlmodel.predict(data_dict, useCPUOnly=True)["out"]

            np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
            np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1)
示例#2
0
def _keras_2_mlmodel_image():
    """
    Converts a Keras h5 model into ML Model for image data and saves it on 
    disk.

    NOTE: Image configuration must be specified from Explora. 

    NOTE: Currently, only categorical cross entropy loss is supported.
    """
    model = get_keras_model()
    ios_config = state.state["ios_config"]
    class_labels = ios_config["class_labels"]
    mlmodel = keras_converter.convert(model, input_names=['image'],
                                output_names=['output'],
                                class_labels=class_labels,
                                predicted_feature_name='label')
    mlmodel.save(state.state["mlmodel_path"])

    image_config = ios_config["image_config"]
    spec = coremltools.utils.load_spec(state.state["mlmodel_path"])
    builder = coremltools.models.neural_network.NeuralNetworkBuilder(spec=spec)

    dims = image_config["dims"]
    spec.description.input[0].type.imageType.width = dims[0]
    spec.description.input[0].type.imageType.height = dims[1]

    cs = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value(image_config["color_space"])
    spec.description.input[0].type.imageType.colorSpace = cs

    trainable_layer_names = [layer.name for layer in model.layers if layer.get_weights()]
    builder.make_updatable(trainable_layer_names)

    builder.set_categorical_cross_entropy_loss(name='loss', input='output')

    if isinstance(model.optimizer, SGD):
        params = SgdParams(
            lr=K.eval(model.optimizer.lr), 
            batch=state.state["hyperparams"]["batch_size"],
        )
        builder.set_sgd_optimizer(params)
    elif isinstance(model.optimizer, Adam):
        params = AdamParams(
            lr=K.eval(model.optimizer.lr), 
            batch_size=state.state["hyperparams"]["batch_size"],
            beta1=model.optimizer.beta1,
            beta2=model.optimizer.beta2,
            eps=model.optimizer.eps,    
        )
        builder.set_adam_optimizer(params)
    else:
        raise Exception("iOS optimizer must be SGD or Adam!")

    builder.set_epochs(UNLIMITED_EPOCHS)
    builder.set_shuffle(state.state["hyperparams"]["shuffle"])  

    mlmodel_updatable = MLModel(spec)
    mlmodel_updatable.save(state.state["mlmodel_path"])

    K.clear_session()
示例#3
0
    def test_nn_builder_with_training_features(self):

        input_features = [('input', datatypes.Array(3))]
        output_features = [('output', None)]
        training_features = [('input', datatypes.Array(3)),
                             ('target', datatypes.Double)]

        builder = NeuralNetworkBuilder(input_features,
                                       output_features,
                                       disable_rank5_shape_mapping=True,
                                       training_features=training_features)

        W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
        W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(name='ip1',
                                  W=W1,
                                  b=None,
                                  input_channels=3,
                                  output_channels=3,
                                  has_bias=False,
                                  input_name='input',
                                  output_name='hidden')
        builder.add_inner_product(name='ip2',
                                  W=W2,
                                  b=None,
                                  input_channels=3,
                                  output_channels=3,
                                  has_bias=False,
                                  input_name='hidden',
                                  output_name='output')

        builder.make_updatable(['ip1', 'ip2'])  # or a dict for weightParams

        builder.set_mean_squared_error_loss(name='mse',
                                            input='output',
                                            target='target')

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        builder.set_training_input([('input', datatypes.Array(3)),
                                    ('target', 'Double')])

        model_path = os.path.join(self.model_dir, 'updatable_creation.mlmodel')
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertEqual(spec.description.trainingInput[0].name, 'input')
        self.assertEqual(
            spec.description.trainingInput[0].type.WhichOneof('Type'),
            'multiArrayType')
        self.assertEqual(spec.description.trainingInput[1].name, 'target')
        self.assertEqual(
            spec.description.trainingInput[1].type.WhichOneof('Type'),
            'doubleType')
示例#4
0
 def test_rename_input(self):
     rename_feature(self.spec, 'feature_1', 'renamed_feature', rename_inputs=True)
     model = MLModel(self.spec)
     preds = model.predict({'renamed_feature': 1.0, 'feature_2': 1.0})
     self.assertIsNotNone(preds)
     self.assertEquals(preds['output'], 3.1)
     # reset the spec for next run
     rename_feature(self.spec, 'renamed_feature', 'feature_1', rename_inputs=True)
示例#5
0
 def test_rename_input(self):
     rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True)
     model = MLModel(self.spec)
     preds = model.predict({"renamed_feature": 1.0, "feature_2": 1.0})
     self.assertIsNotNone(preds)
     self.assertEqual(preds["output"], 3.1)
     # reset the spec for next run
     rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True)
示例#6
0
 def test_rename_output_bad(self):
     rename_feature(
         self.spec, "blah", "bad_name", rename_inputs=False, rename_outputs=True
     )
     model = MLModel(self.spec)
     preds = model.predict({"feature_1": 1.0, "feature_2": 1.0})
     self.assertIsNotNone(preds)
     self.assertEqual(preds["output"], 3.1)
示例#7
0
    def test_rename_input_bad(self):
        utils.rename_feature(self.spec, "blah", "bad_name", rename_inputs=True)
        model = MLModel(self.spec)

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        if utils._macos_version() >= (12, 0):
            preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0})
            assert preds is not None
            assert preds["output"] == 3.1

        # cleanup
        _remove_path(package.name)
示例#8
0
 def test_path_to_spec_to_mlmodel(self):
     """
     load a spec from disk, then convert it to mlmodel, and check that it works
     """
     spec = utils.load_spec(self.mlpackage_path)
     weights_dir = self.mlpackage_path + "/Data/" + _MLPACKAGE_AUTHOR_NAME + "/weights"
     mlmodel = MLModel(spec, weights_dir=weights_dir)
     self._test_mlmodel_correctness(mlmodel)
示例#9
0
    def test_save(self):
        model = MLModel(self.spec)

        # Verify "save" can be called twice and the saved
        # model can be loaded successfully each time
        for _ in range(0, 2):
            package = tempfile.TemporaryDirectory(suffix=".mlpackage")
            package.cleanup()

            model.save(package.name)
            loaded_model = MLModel(package.name)

            if utils._macos_version() >= (12, 0):
                preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0})
                assert preds is not None
                assert preds["output"] == 3.1

            _remove_path(package.name)
示例#10
0
def _get_model(spec):
    """
    Utility to get the model and the data.
    """
    from . import MLModel
    if isinstance(spec, MLModel):
        return spec
    else:
        return MLModel(spec)
示例#11
0
 def test_future_version(self):
     self.spec.specificationVersion = 10000
     model = MLModel(self.spec)
     # this model should exist, but throw an exception when we try to use predict because the engine doesn't support
     # this model version
     self.assertIsNotNone(model)
     with self.assertRaises(Exception):
         model.predict(1)
     self.spec.specificationVersion = 1
示例#12
0
def _convert_to_classifier(proto, classifier_config, skip_model_load=False):
    tmp_model = MLModel(proto, skip_model_load=skip_model_load)
    tmp_model = neural_network.utils.make_nn_classifier(
        tmp_model,
        classifier_config.class_labels,
        classifier_config.predicted_feature_name,
        classifier_config.predicted_probabilities_output,
    )
    return tmp_model.get_spec()
示例#13
0
    def test_pipeline_rename(self):
        # Convert
        scikit_spec = converter.convert(self.scikit_model).get_spec()
        model = MLModel(scikit_spec)
        sample_data = self.scikit_data.data[0]

        # Rename
        rename_feature(scikit_spec, "input", "renamed_input")
        renamed_model = MLModel(scikit_spec)

        # Check the predictions
        if _is_macos() and _macos_version() >= (10, 13):
            out_dict = model.predict({"input": sample_data})
            out_dict_renamed = renamed_model.predict({"renamed_input": sample_data})
            self.assertAlmostEqual(list(out_dict.keys()), list(out_dict_renamed.keys()))
            self.assertAlmostEqual(
                list(out_dict.values()), list(out_dict_renamed.values())
            )
示例#14
0
    def load_model(cls, path: Union[str, pathlib.Path]):
        """
        Deserializes a VGSL model from a CoreML file.

        Args:
            path: CoreML file

        Returns:
            A TorchVGSLModel instance.

        Raises:
            KrakenInvalidModelException if the model data is invalid (not a
            string, protobuf file, or without appropriate metadata).
            FileNotFoundError if the path doesn't point to a file.
        """
        if isinstance(path, pathlib.Path):
            path = path.as_posix()
        try:
            mlmodel = MLModel(path)
        except TypeError as e:
            raise KrakenInvalidModelException(str(e))
        except DecodeError as e:
            raise KrakenInvalidModelException(
                'Failure parsing model protobuf: {}'.format(str(e)))
        if 'vgsl' not in mlmodel.user_defined_metadata:
            raise KrakenInvalidModelException('No VGSL spec in model metadata')
        vgsl_spec = mlmodel.user_defined_metadata['vgsl']
        nn = cls(vgsl_spec)

        def _deserialize_layers(name, layer):
            logger.debug(f'Deserializing layer {name} with type {type(layer)}')
            if type(layer) in (layers.MultiParamParallel,
                               layers.MultiParamSequential):
                for name, l in layer.named_children():
                    _deserialize_layers(name, l)
            else:
                layer.deserialize(name, mlmodel.get_spec())

        _deserialize_layers('', nn.nn)

        if 'codec' in mlmodel.user_defined_metadata:
            nn.add_codec(
                PytorchCodec(json.loads(
                    mlmodel.user_defined_metadata['codec'])))

        nn.user_metadata = {
            'accuracy': [],
            'seg_type': 'bbox',
            'one_channel_mode': '1',
            'model_type': None,
            'hyper_params': {}
        }  # type: dict[str, str]
        if 'kraken_meta' in mlmodel.user_defined_metadata:
            nn.user_metadata.update(
                json.loads(mlmodel.user_defined_metadata['kraken_meta']))
        return nn
示例#15
0
def _get_model(spec, compute_units=_ComputeUnit.ALL):
    """
    Utility to get the model and the data.
    """
    from . import MLModel

    if isinstance(spec, MLModel):
        return spec
    else:
        return MLModel(spec, compute_units=compute_units)
示例#16
0
 def test_rename_image_input(self):
     input_features = [("data", datatypes.Array(3, 1, 1))]
     output_features = [("out", datatypes.Array(3, 1, 1))]
     builder = NeuralNetworkBuilder(
         input_features, output_features, disable_rank5_shape_mapping=True
     )
     builder.add_activation("linear", "LINEAR", "data", "out")
     spec = builder.spec
     # make an image input
     mlmodel = make_image_input(MLModel(spec), "data", image_format="NCHW", scale=2.0)
     # rename the input
     spec = mlmodel.get_spec()
     rename_feature(spec, "data", "new_input_name")
     mlmodel = MLModel(spec)
     # test
     x = np.array([4, 5, 6], dtype=np.uint8).reshape(1, 1, 3)
     pil_img = PIL.Image.fromarray(x)
     out = mlmodel.predict({"new_input_name": pil_img}, useCPUOnly=True)['out']
     np.testing.assert_equal(out, np.array([8.0, 10.0, 12.0]).reshape(3, 1, 1))
    def test_nn_partial_fp16_make_updatable_fail(self):
        nn_builder = self.create_base_builder()
        model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
        print(model_path)
        save_spec(nn_builder.spec, model_path)
        mlmodel = MLModel(model_path)

        # fails since updatable models cannot get quantized to FP16
        with self.assertRaises(Exception):
            quantization_utils.quantize_weights(mlmodel, 16, "linear")
示例#18
0
 def test_rename_output_bad(self):
     rename_feature(self.spec,
                    'blah',
                    'bad_name',
                    rename_inputs=False,
                    rename_outputs=True)
     model = MLModel(self.spec)
     preds = model.predict({'feature_1': 1.0, 'feature_2': 1.0})
     self.assertIsNotNone(preds)
     self.assertEqual(preds['output'], 3.1)
示例#19
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = "Test author"
        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.get_spec().description.metadata.author,
                         "Test author")

        model.license = "Test license"
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.get_spec().description.metadata.license,
                         "Test license")

        model.short_description = "Test model"
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription,
            "Test model")

        model.version = "1.3"
        self.assertEqual(model.version, "1.3")
        self.assertEqual(model.get_spec().description.metadata.versionString,
                         "1.3")

        model.input_description["feature_1"] = "This is feature 1"
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")

        model.output_description["output"] = "This is output"
        self.assertEqual(model.output_description["output"], "This is output")

        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.license, "Test license")
        # self.assertEqual(model.short_description, 'Test model')
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")
        self.assertEqual(model.output_description["output"], "This is output")
    def __init__(self):
        vggish_model_file = VGGish()

        if _mac_ver() < (10, 14):
            # Use TensorFlow/Keras
            model_path = vggish_model_file.get_model_path(format='tensorflow')
            self.vggish_model = _keras.models.load_model(model_path)
        else:
            # Use Core ML
            model_path = vggish_model_file.get_model_path(format='coreml')
            self.vggish_model = MLModel(model_path)
示例#21
0
    def test_simple_branch(self):
        """ Test a simple if-else branch network
        """
        input_features = [("data", datatypes.Array(3)), ("cond", datatypes.Array(1))]
        output_features = [("output", None)]

        builder_top = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        layer = builder_top.add_branch("branch_layer", "cond")

        builder_ifbranch = NeuralNetworkBuilder(
            input_features=None,
            output_features=None,
            spec=None,
            nn_spec=layer.branch.ifBranch,
        )
        builder_ifbranch.add_elementwise(
            "mult_layer",
            input_names=["data"],
            output_name="output",
            mode="MULTIPLY",
            alpha=10,
        )
        builder_elsebranch = NeuralNetworkBuilder(
            input_features=None,
            output_features=None,
            spec=None,
            nn_spec=layer.branch.elseBranch,
        )
        builder_elsebranch.add_elementwise(
            "add_layer",
            input_names=["data"],
            output_name="output",
            mode="ADD",
            alpha=10,
        )
        coremltools.models.utils.save_spec(
            builder_top.spec, "/tmp/simple_branch.mlmodel"
        )
        mlmodel = MLModel(builder_top.spec)

        # True branch case
        input_dict = {
            "data": np.array(range(1, 4), dtype="float"),
            "cond": np.array([1], dtype="float"),
        }
        output_ref = {"output": input_dict["data"] * 10}
        self._test_model(mlmodel, input_dict, output_ref)

        # False branch case
        input_dict["cond"] = np.array([0], dtype="float")
        output_ref["output"] = input_dict["data"] + 10
        self._test_model(mlmodel, input_dict, output_ref)
 def get_spec(self):
     """
     Return the Core ML spec
     """
     if _mac_ver() >= (10, 14):
         return self.vggish_model.get_spec()
     else:
         vggish_model_file = VGGish()
         coreml_model_path = vggish_model_file.get_model_path(
             format='coreml')
         return MLModel(coreml_model_path).get_spec()
示例#23
0
 def test_mlmodel_to_spec_to_mlmodel(self):
     """
     convert mlmodel to spec, and then back to mlmodel and verify that it works
     """
     spec = self.mlmodel.get_spec()
     # reload the model from the spec and verify it
     weights_dir = self.mlmodel.weights_dir
     mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir)
     self._test_mlmodel_correctness(mlmodel_from_spec)
     # check that the original model still works
     self._test_mlmodel_correctness(self.mlmodel)
     # check that an error is raised when MLModel is initialized without the weights
     with pytest.raises(
             Exception,
             match=
             "MLModel of type mlProgram cannot be loaded just from the model "
             "spec object. It also needs the path to the weights file. "
             "Please provide that as well, using the 'weights_dir' argument."
     ):
         MLModel(spec)
示例#24
0
def main():
    model = MLModel('pricing.mlmodel')
    with open('input.csv') as input:
        csv_reader = csv.reader(input, delimiter=',')
        with open('output.csv', mode='w') as output:
            csv_writer = csv.writer(output, delimiter=',')
            for row in csv_reader:
                sqft = int(row[0])
                price = int(model.predict({'sqft': sqft})['price'])
                csv_writer.writerow([sqft, price])
    print('Saved output.csv')
示例#25
0
 def test_save_spec_api_model_with_no_weights(self):
     """
     save an mlprogram model with no weights, using the save SPI and an empty weights directory.
     Reload the model from disk and verify it works
     """
     spec = self.mlmodel_no_weights.get_spec()
     with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as model_path:
         with tempfile.TemporaryDirectory() as empty_weight_dir:
             utils.save_spec(spec, model_path, weights_dir=empty_weight_dir)
             model = MLModel(model_path)
             self._test_mlmodel_correctness(model)
示例#26
0
    def test_model_api(self):
        model = MLModel(self.spec)
        assert model is not None

        model.author = "Test author"
        assert model.author == "Test author"
        assert model.get_spec().description.metadata.author == "Test author"

        model.license = "Test license"
        assert model.license == "Test license"
        assert model.get_spec().description.metadata.license == "Test license"

        model.short_description = "Test model"
        assert model.short_description == "Test model"
        assert model.get_spec(
        ).description.metadata.shortDescription == "Test model"

        model.version = "1.3"
        assert model.version == "1.3"
        assert model.get_spec().description.metadata.versionString == "1.3"

        model.input_description["feature_1"] = "This is feature 1"
        assert model.input_description["feature_1"] == "This is feature 1"

        model.output_description["output"] = "This is output"
        assert model.output_description["output"] == "This is output"

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        assert model.author == "Test author"
        assert model.license == "Test license"
        assert model.short_description == "Test model"
        assert model.input_description["feature_1"] == "This is feature 1"
        assert model.output_description["output"] == "This is output"

        # cleanup
        _remove_path(package.name)
示例#27
0
    def test_nn_builder_with_training_features(self):
        input_features = [("input", datatypes.Array(3))]
        output_features = [("output", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)

        W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
        W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(
            name="ip1",
            W=W1,
            b=None,
            input_channels=3,
            output_channels=3,
            has_bias=False,
            input_name="input",
            output_name="hidden",
        )
        builder.add_inner_product(
            name="ip2",
            W=W2,
            b=None,
            input_channels=3,
            output_channels=3,
            has_bias=False,
            input_name="hidden",
            output_name="output",
        )

        builder.make_updatable(["ip1", "ip2"])  # or a dict for weightParams

        builder.set_mean_squared_error_loss(name="mse",
                                            input_feature=("output",
                                                           datatypes.Array(3)))

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertEqual(spec.description.trainingInput[0].name, "input")
        self.assertEqual(
            spec.description.trainingInput[0].type.WhichOneof("Type"),
            "multiArrayType")
        self.assertEqual(spec.description.trainingInput[1].name, "output_true")
        self.assertEqual(
            spec.description.trainingInput[1].type.WhichOneof("Type"),
            "multiArrayType")
示例#28
0
        def get_coreml_model_depthwise(X, params, w):
            eval = True
            mlmodel = None
            try:
                input_dim = X.shape[2:]
                input_features = [("data", datatypes.Array(*input_dim))]
                output_features = [("output", None)]
                builder = neural_network.NeuralNetworkBuilder(
                    input_features, output_features)
                # tranlate weights : (Kh, Kw, kernel_channels, output_channels) == (Kh, Kw, Cin/g, Cout) == (Kh, Kw, 1, channel_multiplier * Cin)
                w_e = np.reshape(
                    w,
                    (
                        params["kernel_size"],
                        params["kernel_size"],
                        params["multiplier"] * params["C"],
                        1,
                    ),
                )
                w_e = np.transpose(w_e, [0, 1, 3, 2])
                if params["padding"] == "SAME":
                    pad_mode = "same"
                else:
                    pad_mode = "valid"
                builder.add_convolution(
                    "conv",
                    kernel_channels=1,
                    output_channels=params["multiplier"] * params["C"],
                    height=params["kernel_size"],
                    width=params["kernel_size"],
                    stride_height=params["stride"],
                    stride_width=params["stride"],
                    border_mode=pad_mode,
                    groups=params["C"],
                    W=w_e,
                    b=None,
                    has_bias=0,
                    is_deconv=0,
                    output_shape=None,
                    input_name="data",
                    output_name="output",
                )

                if cpu_only:
                    compute_unit = ComputeUnit.CPU_ONLY
                else:
                    compute_unit = ComputeUnit.ALL
                mlmodel = MLModel(builder.spec, compute_units=compute_unit)
            except RuntimeError as e:
                print(e)
                eval = False
            return mlmodel, eval
示例#29
0
    def test_mlmodel_to_spec_to_mlmodel_with_no_weights_model(self):
        """
        convert mlmodel to spec, and then back to mlmodel and verify that it works
        """
        spec = self.mlmodel_no_weights.get_spec()
        # if no weights_dir is passed, error will be raised
        with pytest.raises(Exception, match="MLModel of type mlProgram cannot be loaded just from the model "
                                             "spec object. It also needs the path to the weights file. "
                                             "Please provide that as well, using the 'weights_dir' argument."):
            MLModel(spec)

        # weights_dir will still exist, even though the model has no weights,
        # with a weights file that only has header and no data
        weights_dir = self.mlmodel_no_weights.weights_dir
        assert weights_dir is not None
        mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir)
        self._test_mlmodel_correctness(mlmodel_from_spec)

        # load mlmodel from spec using an empty weights_dir
        with tempfile.TemporaryDirectory() as empty_weight_dir:
            mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir)
            self._test_mlmodel_correctness(mlmodel_from_spec)
示例#30
0
    def test_predict_api(self):
        model = MLModel(self.spec)

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)

        if utils._macos_version() >= (12, 0):
            for compute_units in coremltools.ComputeUnit:
                loaded_model = MLModel(package.name, compute_units=compute_units)

                preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0})
                assert preds is not None
                assert preds["output"] == 3.1
                assert loaded_model.compute_unit == compute_units
        else:
            # just check if we can load it
            loaded_model = MLModel(package.name)

        # cleanup
        _remove_path(package.name)