示例#1
0
    def save_model(self, path: str):
        """
        Serializes the model into path.

        Args:
            path (str): Target destination
        """
        inputs = [('input', datatypes.Array(*self.input))]
        outputs = [('output', datatypes.Array(*self.output))]
        net_builder = NeuralNetworkBuilder(inputs, outputs)
        input = 'input'
        prev_device = next(next(self.nn.children()).parameters()).device
        try:
            for name, layer in self.nn.to('cpu').named_children():
                input = layer.serialize(name, input, net_builder)
            mlmodel = MLModel(net_builder.spec)
            mlmodel.short_description = 'kraken recognition model'
            mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']'
            if self.codec:
                mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l)
            if self.user_metadata:
                mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata)
            mlmodel.save(path)
        finally:
            self.nn.to(prev_device)
示例#2
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = 'Test author'
        self.assertEquals(model.author, 'Test author')
        self.assertEquals(model.get_spec().description.metadata.author, 'Test author')

        model.license = 'Test license'
        self.assertEquals(model.license, 'Test license')
        self.assertEquals(model.get_spec().description.metadata.license, 'Test license')

        model.short_description = 'Test model'
        self.assertEquals(model.short_description, 'Test model')
        self.assertEquals(model.get_spec().description.metadata.shortDescription, 'Test model')

        model.input_description['feature_1'] = 'This is feature 1'
        self.assertEquals(model.input_description['feature_1'], 'This is feature 1')

        model.output_description['output'] = 'This is output'
        self.assertEquals(model.output_description['output'], 'This is output')

        filename = tempfile.mktemp(suffix = '.mlmodel')
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEquals(model.author, 'Test author')
        self.assertEquals(model.license, 'Test license')
        # self.assertEquals(model.short_description, 'Test model')
        self.assertEquals(model.input_description['feature_1'], 'This is feature 1')
        self.assertEquals(model.output_description['output'], 'This is output')
示例#3
0
    def test_rename_input(self):
        utils.rename_feature(self.spec,
                             "feature_1",
                             "renamed_feature",
                             rename_inputs=True)
        model = MLModel(self.spec)

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        if utils._macos_version() >= (12, 0):
            preds = loaded_model.predict({
                "renamed_feature": 1.0,
                "feature_2": 1.0
            })
            assert preds is not None
            assert preds["output"] == 3.1

        # reset the spec for next run
        utils.rename_feature(self.spec,
                             "renamed_feature",
                             "feature_1",
                             rename_inputs=True)

        # cleanup
        _remove_path(package.name)
示例#4
0
    def test_builder_with_validation(self):
        builder = KNearestNeighborsClassifierBuilder(
            input_name='input',
            output_name='output',
            number_of_dimensions=10,
            default_class_label='defaultLabel',
            k=3,
            weighting_scheme='inverse_distance',
            index_type='kd_tree',
            leaf_size=50)
        builder.author = 'CoreML Team'
        builder.license = 'MIT'
        builder.description = 'test_builder_with_validation'

        # Save the updated spec
        coreml_model = MLModel(builder.spec)
        coreml_model_path = '/tmp/__test_builder_with_validation.mlmodel'
        coreml_model.save(coreml_model_path)
        self.assertTrue(os.path.isfile(coreml_model_path))

        try:
            stdout, stderr, return_code = self._compile_mlmodel(
                coreml_model_path)
            self.assertEqual(return_code, 0)
        finally:
            self._delete_mlmodel_and_mlmodelc(coreml_model_path)
示例#5
0
    def test_predict_api(self):
        model = MLModel(self.spec)

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)

        if utils._macos_version() >= (12, 0):
            for compute_units in coremltools.ComputeUnit:
                if (compute_units == coremltools.ComputeUnit.CPU_AND_NE
                        and utils._macos_version() < (13, 0)):
                    continue

                loaded_model = MLModel(package.name,
                                       compute_units=compute_units)

                preds = loaded_model.predict({
                    "feature_1": 1.0,
                    "feature_2": 1.0
                })
                assert preds is not None
                assert preds["output"] == 3.1
                assert loaded_model.compute_unit == compute_units
        else:
            # just check if we can load it
            loaded_model = MLModel(package.name)

        # cleanup
        _remove_path(package.name)
示例#6
0
    def test_predict_api(self):
        model = MLModel(self.spec)

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)

        if utils._macos_version() >= (12, 0):
            for compute_units in coremltools.ComputeUnit:
                loaded_model = MLModel(package.name,
                                       compute_units=compute_units)

                preds = loaded_model.predict({
                    "feature_1": 1.0,
                    "feature_2": 1.0
                })
                self.assertIsNotNone(preds)
                self.assertEqual(preds["output"], 3.1)
                self.assertEqual(loaded_model.compute_unit, compute_units)
        else:
            # just check if we can load it
            loaded_model = MLModel(package.name)

        # cleanup
        MLModelTest._remove_path(package.name)
示例#7
0
    def test_can_init_and_save_model_from_builder_with_updated_spec(self):
        builder = KNearestNeighborsClassifierBuilder(
            input_name="input",
            output_name="output",
            number_of_dimensions=10,
            default_class_label="defaultLabel",
            k=3,
            weighting_scheme="inverse_distance",
            index_type="kd_tree",
            leaf_size=50,
        )
        builder.author = "CoreML Team"
        builder.license = "MIT"
        builder.description = "test_builder_with_validation"

        # Save the updated spec
        coreml_model = MLModel(builder.spec)
        self.assertIsNotNone(coreml_model)
        coreml_model_path = "/tmp/__test_builder_with_validation.mlmodel"

        try:
            coreml_model.save(coreml_model_path)
            self.assertTrue(os.path.isfile(coreml_model_path))
        finally:
            self._delete_mlmodel_and_mlmodelc(coreml_model_path)
示例#8
0
    def save_model(self, path: str):
        """
        Serializes the model into path.

        Args:
            path (str): Target destination
        """
        inputs = [('input', datatypes.Array(*self.input))]
        outputs = [('output', datatypes.Array(*self.output))]
        net_builder = NeuralNetworkBuilder(inputs, outputs)
        input = 'input'
        prev_device = next(next(self.nn.children()).parameters()).device
        try:
            for name, layer in self.nn.to('cpu').named_children():
                input = layer.serialize(name, input, net_builder)
            mlmodel = MLModel(net_builder.spec)
            mlmodel.short_description = 'kraken model'
            mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']'
            if self.codec:
                mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l)
            if self.user_metadata:
                mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata)
            mlmodel.save(path)
        finally:
            self.nn.to(prev_device)
示例#9
0
    def test_downgrade_specification_version(self):
        # manually set a invalid specification version
        self.spec.specificationVersion = -1
        model = MLModel(self.spec)
        assert model.get_spec().specificationVersion == 1

        # manually set a high specification version
        self.spec.specificationVersion = 4
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename, auto_set_specification_version=True)
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # simple neural network with only spec 1 layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_activation('relu', 'RELU', 'data', 'out')
        # set a high specification version
        builder.spec.specificationVersion = 3
        model = MLModel(builder.spec)
        filename = tempfile.mktemp(suffix='.mlmodel')
        model.save(filename)
        # load the model back
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # test save without automatic set specification version
        self.spec.specificationVersion = 3
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # the specification version should be original
        assert model.get_spec().specificationVersion == 3
示例#10
0
def _keras_2_mlmodel_image():
    """
    Converts a Keras h5 model into ML Model for image data and saves it on 
    disk.

    NOTE: Image configuration must be specified from Explora. 

    NOTE: Currently, only categorical cross entropy loss is supported.
    """
    model = get_keras_model()
    ios_config = state.state["ios_config"]
    class_labels = ios_config["class_labels"]
    mlmodel = keras_converter.convert(model, input_names=['image'],
                                output_names=['output'],
                                class_labels=class_labels,
                                predicted_feature_name='label')
    mlmodel.save(state.state["mlmodel_path"])

    image_config = ios_config["image_config"]
    spec = coremltools.utils.load_spec(state.state["mlmodel_path"])
    builder = coremltools.models.neural_network.NeuralNetworkBuilder(spec=spec)

    dims = image_config["dims"]
    spec.description.input[0].type.imageType.width = dims[0]
    spec.description.input[0].type.imageType.height = dims[1]

    cs = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value(image_config["color_space"])
    spec.description.input[0].type.imageType.colorSpace = cs

    trainable_layer_names = [layer.name for layer in model.layers if layer.get_weights()]
    builder.make_updatable(trainable_layer_names)

    builder.set_categorical_cross_entropy_loss(name='loss', input='output')

    if isinstance(model.optimizer, SGD):
        params = SgdParams(
            lr=K.eval(model.optimizer.lr), 
            batch=state.state["hyperparams"]["batch_size"],
        )
        builder.set_sgd_optimizer(params)
    elif isinstance(model.optimizer, Adam):
        params = AdamParams(
            lr=K.eval(model.optimizer.lr), 
            batch_size=state.state["hyperparams"]["batch_size"],
            beta1=model.optimizer.beta1,
            beta2=model.optimizer.beta2,
            eps=model.optimizer.eps,    
        )
        builder.set_adam_optimizer(params)
    else:
        raise Exception("iOS optimizer must be SGD or Adam!")

    builder.set_epochs(UNLIMITED_EPOCHS)
    builder.set_shuffle(state.state["hyperparams"]["shuffle"])  

    mlmodel_updatable = MLModel(spec)
    mlmodel_updatable.save(state.state["mlmodel_path"])

    K.clear_session()
示例#11
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = "Test author"
        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.get_spec().description.metadata.author,
                         "Test author")

        model.license = "Test license"
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.get_spec().description.metadata.license,
                         "Test license")

        model.short_description = "Test model"
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription,
            "Test model")

        model.version = "1.3"
        self.assertEqual(model.version, "1.3")
        self.assertEqual(model.get_spec().description.metadata.versionString,
                         "1.3")

        model.input_description["feature_1"] = "This is feature 1"
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")

        model.output_description["output"] = "This is output"
        self.assertEqual(model.output_description["output"], "This is output")

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")
        self.assertEqual(model.output_description["output"], "This is output")

        # cleanup
        MLModelTest._remove_path(package.name)
示例#12
0
    def test_rename_input_bad(self):
        rename_feature(self.spec, "blah", "bad_name", rename_inputs=True)
        model = MLModel(self.spec)

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        if utils._macos_version() >= (12, 0):
            preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0})
            self.assertIsNotNone(preds)
            self.assertEqual(preds["output"], 3.1)

        # cleanup
        MLModelTest._remove_path(package.name)
    def test_builder_with_compilation_default_parameters(self):
        builder = KNearestNeighborsClassifierBuilder(input_name='input',
                                                     output_name='output',
                                                     number_of_dimensions=4,
                                                     default_class_label='defaultLabel')

        # Save the updated spec
        coreml_model = MLModel(builder.spec)
        coreml_model_path = '/tmp/__test_builder_with_validation.mlmodel'
        coreml_model.save(coreml_model_path)
        self.assertTrue(os.path.isfile(coreml_model_path))

        try:
            stdout, stderr, return_code = self._compile_mlmodel(coreml_model_path)
            self.assertEqual(return_code, 0)
        finally:
            self._delete_mlmodel_and_mlmodelc(coreml_model_path)
    def test_can_init_and_save_model_from_builder_default_parameters(self):
        builder = KNearestNeighborsClassifierBuilder(
            input_name='input',
            output_name='output',
            number_of_dimensions=4,
            default_class_label='defaultLabel')

        # Save the updated spec
        coreml_model = MLModel(builder.spec)
        self.assertIsNotNone(coreml_model)
        coreml_model_path = '/tmp/__test_builder_with_validation.mlmodel'

        try:
            coreml_model.save(coreml_model_path)
            self.assertTrue(os.path.isfile(coreml_model_path))
        finally:
            self._delete_mlmodel_and_mlmodelc(coreml_model_path)
示例#15
0
    def test_save(self):
        model = MLModel(self.spec)

        # Verify "save" can be called twice and the saved
        # model can be loaded successfully each time
        for _ in range(0, 2):
            package = tempfile.TemporaryDirectory(suffix=".mlpackage")
            package.cleanup()

            model.save(package.name)
            loaded_model = MLModel(package.name)

            if utils._macos_version() >= (12, 0):
                preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0})
                assert preds is not None
                assert preds["output"] == 3.1

            _remove_path(package.name)
示例#16
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = "Test author"
        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.get_spec().description.metadata.author,
                         "Test author")

        model.license = "Test license"
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.get_spec().description.metadata.license,
                         "Test license")

        model.short_description = "Test model"
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription,
            "Test model")

        model.version = "1.3"
        self.assertEqual(model.version, "1.3")
        self.assertEqual(model.get_spec().description.metadata.versionString,
                         "1.3")

        model.input_description["feature_1"] = "This is feature 1"
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")

        model.output_description["output"] = "This is output"
        self.assertEqual(model.output_description["output"], "This is output")

        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.license, "Test license")
        # self.assertEqual(model.short_description, 'Test model')
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")
        self.assertEqual(model.output_description["output"], "This is output")
示例#17
0
    def test_model_api(self):
        model = MLModel(self.spec)
        assert model is not None

        model.author = "Test author"
        assert model.author == "Test author"
        assert model.get_spec().description.metadata.author == "Test author"

        model.license = "Test license"
        assert model.license == "Test license"
        assert model.get_spec().description.metadata.license == "Test license"

        model.short_description = "Test model"
        assert model.short_description == "Test model"
        assert model.get_spec(
        ).description.metadata.shortDescription == "Test model"

        model.version = "1.3"
        assert model.version == "1.3"
        assert model.get_spec().description.metadata.versionString == "1.3"

        model.input_description["feature_1"] = "This is feature 1"
        assert model.input_description["feature_1"] == "This is feature 1"

        model.output_description["output"] = "This is output"
        assert model.output_description["output"] == "This is output"

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        assert model.author == "Test author"
        assert model.license == "Test license"
        assert model.short_description == "Test model"
        assert model.input_description["feature_1"] == "This is feature 1"
        assert model.output_description["output"] == "This is output"

        # cleanup
        _remove_path(package.name)
示例#18
0
    def save_model(self, path: str):
        """
        Serializes the model into path.

        Args:
            path (str): Target destination
        """
        inputs = [('input', datatypes.Array(*self.input))]
        outputs = [('output', datatypes.Array(*self.output))]
        net_builder = NeuralNetworkBuilder(inputs, outputs)
        input = 'input'
        prev_device = next(self.nn.parameters()).device
        try:
            self.nn.to('cpu')

            def _serialize_layer(net, input, net_builder):
                for name, l in net.named_children():
                    logger.debug(
                        f'Serializing layer {name} with type {type(l)}')
                    if type(l) in (layers.MultiParamParallel,
                                   layers.MultiParamSequential):
                        _serialize_layer(l, input, net_builder)
                    else:
                        l.serialize(name, input, net_builder)

            _serialize_layer(self.nn, input, net_builder)
            mlmodel = MLModel(net_builder.spec)
            mlmodel.short_description = 'kraken model'
            mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(
                self.named_spec) + ']'
            if self.codec:
                mlmodel.user_defined_metadata['codec'] = json.dumps(
                    self.codec.c2l)
            if self.user_metadata:
                mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(
                    self.user_metadata)
            mlmodel.save(path)
        finally:
            self.nn.to(prev_device)
示例#19
0
    def test_save_in_place(self):
        model = MLModel(self.spec)

        # Verify "save" can be called twice and the saved
        # model can be loaded successfully each time
        # the mlpackage remains in place after the first save
        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()
        for _ in range(2):

            model.save(package.name)
            loaded_model = MLModel(package.name)

            if utils._macos_version() >= (12, 0):
                preds = loaded_model.predict({
                    "feature_1": 1.0,
                    "feature_2": 1.0
                })
                self.assertIsNotNone(preds)
                self.assertEqual(preds["output"], 3.1)

        MLModelTest._remove_path(package.name)
示例#20
0
def make_updatable(builder, mlmodel_url, mlmodel_updatable_path):
    """This method makes an existing non-updatable mlmodel updatable.
    mlmodel_url - the path the Core ML model is stored.
    mlmodel_updatable_path - the path the updatable Core ML model will be saved.
    """
    import coremltools
    model_spec = builder.spec

    # make_updatable method is used to make a layer updatable. It requires a list of layer names.
    # dense_1 and dense_2 are two innerProduct layer in this example and we make them updatable.
    builder.make_updatable(['dense_1', 'dense_2'])

    # Categorical Cross Entropy or Mean Squared Error can be chosen for the loss layer.
    # Categorical Cross Entropy is used on this example. CCE requires two inputs: 'name' and 'input'.
    # name must be a string and will be the name associated with the loss layer
    # input must be the output of a softmax layer in the case of CCE.
    # The loss's target will be provided automatically as a part of the model's training inputs.
    builder.set_categorical_cross_entropy_loss(name='lossLayer',
                                               input='digitProbabilities')

    # in addition of the loss layer, an optimizer must also be defined. SGD and Adam optimizers are supported.
    # SGD has been used for this example. To use SGD, one must set lr(learningRate) and batch(miniBatchSize) (momentum is an optional parameter).
    from coremltools.models.neural_network import SgdParams
    builder.set_sgd_optimizer(SgdParams(lr=0.01, batch=32))

    # Finally, the number of epochs must be set as follows.
    builder.set_epochs(10)

    # Set training inputs descriptions
    model_spec.description.trainingInput[
        0].shortDescription = 'Example image of handwritten digit'
    model_spec.description.trainingInput[
        1].shortDescription = 'Associated true label (digit) of example image'

    # save the updated spec
    from coremltools.models import MLModel
    mlmodel_updatable = MLModel(model_spec)
    mlmodel_updatable.save(mlmodel_updatable_path)
示例#21
0
    def test_downgrade_specification_version(self):
        # manually set a invalid specification version
        self.spec.specificationVersion = -1
        model = MLModel(self.spec)
        if model.get_spec().specificationVersion != 1:
            raise AssertionError

        # manually set a high specification version
        self.spec.specificationVersion = 4
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=True)
        model = MLModel(filename)
        if model.get_spec().specificationVersion != 1:
            raise AssertionError

        # simple neural network with only spec 1 layer
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_activation("relu", "RELU", "data", "out")
        # set a high specification version
        builder.spec.specificationVersion = 3
        model = MLModel(builder.spec)
        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        # load the model back
        model = MLModel(filename)
        if model.get_spec().specificationVersion != 1:
            raise AssertionError

        # test save without automatic set specification version
        self.spec.specificationVersion = 3
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # the specification version should be original
        if model.get_spec().specificationVersion != 3:
            raise AssertionError
示例#22
0
def convert_keras_to_mlmodel(keras_model_path, coreml_model_path):

    import importlib.machinery as imm
    from coremltools.converters.keras._keras_converter import convertToSpec
    from coremltools.models import MLModel, _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
    #    from coremltools.models.utils import convert_double_to_float_multiarray_type

    from keras.models import load_model
    from kerassurgeon.operations import delete_layer

    sys.path.append(os.path.dirname(sys.argv[4]))

    # Import neural network code
    NN_file_name = os.path.splitext(os.path.basename(sys.argv[3]))[0]
    NN = imm.SourceFileLoader(NN_file_name, sys.argv[3]).load_module()

    try:
        NN_model_name = NN.Model_Name()
    except:
        NN_model_name = NN_file_name

    try:
        NN_model_description = NN.Model_Description()
    except:
        NN_model_description = None

    # Load custom layers if implemented in each Keras model
    # Take care the imported NN may not have Custom_Layers() def, so try and catch except.
    # The type is a dictionary. The keys are supposed to be same as the corresponding values (=defs).
    try:
        NN_custom_layers = NN.Custom_Layers()
    except:
        NN_custom_layers = {}

    # Import Train.py to get custom loss and metrics
    Train_name = os.path.splitext(os.path.basename(sys.argv[4]))[0]
    Train_py = imm.SourceFileLoader(Train_name, sys.argv[4]).load_module()

    custom_loss = Train_py.get_loss()
    custom_metrics = Train_py.get_metrics()

    kpt, kex = os.path.splitext(keras_model_path)
    keras_model_path_temp = kpt + '_temp' + kex

    print('----------------------------------------------------------')
    print('NN model file path: {}'.format(sys.argv[3]))
    print('NN model name: {}'.format(NN_model_name))
    print('NN model description: {}'.format(NN_model_description))
    print('NN custom layers:')
    print(NN_custom_layers)
    print('Training file path and loss/metrics used:')
    print(sys.argv[4])
    print(custom_loss)
    print(custom_metrics)

    print('----------------------------------------------------------')
    print('Keras model file: {}'.format(keras_model_path))
    print('Keras model file temp: {}'.format(keras_model_path_temp))
    print('CoreML model file: {}'.format(coreml_model_path))

    print('----------------------------------------------------------')
    print('Keras custom layers implemented in AIAS for this code:')
    for k in conversion_func_in_AIAS:
        print(k)

    # Deleting Dropout layers from the Keras model to be converted
    # Because the layers will cause unknown conversion failures in coremltools
    keras_model = load_model(keras_model_path,
                             custom_objects=dict(**custom_loss,
                                                 **custom_metrics,
                                                 **NN_custom_layers),
                             compile=False)

    print('----------------------------------------------------------')
    keras_model.summary()

    del_prefixs = [
        'gaussian_dropout', 'gaussian_noise', 'dropout', 'spatial_dropout2d'
    ]  # Add here to define the layer to be deleted

    for del_prefix in del_prefixs:
        idp = 1
        while True:
            try:
                layer = keras_model.get_layer('{}_{}'.format(del_prefix, idp))
            except:
                break
            print('Deleting layer: {}_{}'.format(del_prefix, idp))
            keras_model = delete_layer(model=keras_model,
                                       layer=layer,
                                       copy=False)
            idp += 1

    keras_model.summary()
    print('Saving temporary Keras model: {}'.format(keras_model_path_temp))
    keras_model.save(keras_model_path_temp)

    # Construct custom layers and conversion functions
    custom_layers = {}
    custom_conversion_func = {}
    print('----------------------------------------------------------')

    if NN_custom_layers is not None:
        print('Custom layers in this Keras model:')
        for keras_layer_key in NN_custom_layers:
            if keras_layer_key in conversion_func_in_AIAS:
                print(keras_layer_key + ' - available')
                custom_layers[keras_layer_key] = NN_custom_layers[
                    keras_layer_key]
                custom_conversion_func[
                    keras_layer_key] = conversion_func_in_AIAS[keras_layer_key]
            else:
                print(keras_layer_key + ' - unavailable')

        print('Matched layers and conversion functions for coremltools:')
        print(custom_layers)
        print(custom_conversion_func)

    else:
        print('Custom layers not found in this Keras model.')

    custom_objects = dict(**custom_loss, **custom_metrics, **custom_layers)

    print('----------------------------------------------------------')
    print('Custom objects passed into coremltools converter:')
    print(custom_objects)
    print('----------------------------------------------------------')

    # Convert
    # Do not change the input_names/output_names because they are used to identify input/output layers in Keras code
    spec = convertToSpec(keras_model_path_temp,
                         input_names='input',
                         output_names='output',
                         add_custom_layers=True,
                         custom_conversion_functions=custom_conversion_func,
                         custom_objects=custom_objects,
                         respect_trainable=False)  # should be True???
    model = MLModel(spec)

    # Set descriptions
    model.author = 'Takashi Shirakawa'
    model.license = '(C) 2019-2020, Takashi Shirakawa. All right reserved.'
    model.short_description = NN_model_name + ' for A.I.Segmentation'
    model.input_description[
        'input'] = 'Input is a square image with 8-bit grayscale per pixel.'
    model.output_description[
        'output'] = 'Output (segmentation) is supposed to be an image with the same dimension and format.'

    # Save mlmodel
    model.save(coreml_model_path)

    #    spec_f = model.get_spec()
    #    convert_double_to_float_multiarray_type(spec_f)
    #    model_f = MLModel(spec_f)
    #    model_f.save(os.path.splitext(coreml_model_path)[0] + ', float_multiarray.mlmodel')

    # Show results
    spec = model.get_spec()
    print('----------------------------------------------------------')
    print('Model descriptions:')
    print(spec.description)
    #    print('Model descriptions (float multiarray type):')
    #    print(spec_f.description)

    print('Custom layers:')
    for i, layer in enumerate(spec.neuralNetwork.layers):
        if layer.HasField('custom'):
            print('Layer %d = %s : class name = %s' %
                  (i + 1, layer.name, layer.custom.className))
#        else:
#            print('Layer %d = %s' % (i, layer.name))

    print('Done.')
import onnx
import coremltools
from onnx_coreml import convert
from coremltools.models import MLModel
# test
onnx_model = onnx.load('models/SegNet_portrait_epoch-0099_sim.onnx')
onnx.checker.check_model(onnx_model)
coreml_model = convert(onnx_model,
                       image_input_names={'input.1'},
                       preprocessing_args={"image_scale": 1 / 255.})
spec = coreml_model.get_spec()
coremltools.utils.rename_feature(spec, 'input.1', 'input_1')
spec.neuralNetwork.preprocessing[0].featureName = 'input_1'
coreml_model = MLModel(spec)
coreml_model.save('SegNet.mlmodel')
def train_model(ENV, in_file, op_file):

    graph = tf.Graph()
    with graph.as_default():
        stacked_layers = {}

        # e.g: log filter bank or MFCC features
        # Has size [batch_size, max_stepsize, num_features], but the
        # batch_size and max_stepsize can vary along each step
        inputs = tf.placeholder(tf.float32, [None, None, num_features])

        targets = tf.sparse_placeholder(tf.int32)
        # 1d array of size [batch_size]
        seq_len = tf.placeholder(tf.int32, [None])

        # Weights & biases
        weight_classes = tf.Variable(
            tf.truncated_normal([num_hidden, num_classes],
                                mean=0,
                                stddev=0.1,
                                dtype=tf.float32))
        bias_classes = tf.Variable(tf.zeros([num_classes]), dtype=tf.float32)

        #_activation = tf.nn.relu#this was causing the model to diverge
        _activation = None

        layers = {'forward': [], 'backward': []}
        for key in layers.keys():
            for i in range(num_layers):
                cell = tf.nn.rnn_cell.LSTMCell(num_hidden,
                                               use_peepholes=True,
                                               activation=_activation,
                                               state_is_tuple=True,
                                               cell_clip=clip_thresh)
                #
                #cell = RWACell(num_units=num_hidden)
                layers[key].append(cell)
            stacked_layers[key] = tf.nn.rnn_cell.MultiRNNCell(
                layers[key], state_is_tuple=True)

        outputs, bilstm_vars = tf.nn.bidirectional_dynamic_rnn(
            stacked_layers['forward'],
            stacked_layers['backward'],
            inputs,
            sequence_length=seq_len,
            time_major=False,  # [batch_size, max_time, num_hidden]
            dtype=tf.float32)
        """
        outputs_concate = tf.concat_v2(outputs, 2)
        outputs_concate = tf.reshape(outputs_concate, [-1, 2*num_hidden])
        # logits = tf.matmul(outputs_concate, weight_classes) + bias_classes
        """
        fw_output = tf.reshape(outputs[0], [-1, num_hidden])
        bw_output = tf.reshape(outputs[1], [-1, num_hidden])
        logits = tf.add(
            tf.add(tf.matmul(fw_output, weight_classes),
                   tf.matmul(bw_output, weight_classes)), bias_classes)

        logits = tf.reshape(logits, [batch_size, -1, num_classes])
        loss = tf.nn.ctc_loss(targets, logits, seq_len, time_major=False)
        error = tf.reduce_mean(loss)
        optimizer = tf.train.MomentumOptimizer(learning_rate,
                                               momentum).minimize(error)

        # Evaluating
        # decoded, log_prob = ctc_ops.ctc_greedy_decoder(tf.transpose(logits, perm=[1, 0, 2]), seq_len)
        decoded, log_prob = tf.nn.ctc_beam_search_decoder(
            tf.transpose(logits, perm=[1, 0, 2]), seq_len)
        label_error_rate = tf.reduce_mean(
            tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)

    data, labels = load_ipad_data(in_file)
    bound = ((3 * len(data) / batch_size) / 4) * batch_size
    train_inputs = data[0:bound]
    train_labels = labels[0:bound]
    test_data = data[bound:]
    test_labels = labels[bound:]
    num_examples = len(train_inputs)
    num_batches_per_epoch = num_examples / batch_size

    with tf.Session(graph=graph,
                    config=tf.ConfigProto(gpu_options=gpu_options)) as session:
        # Initializate the weights and biases
        tf.global_variables_initializer().run()
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=0)

        ckpt = tf.train.get_checkpoint_state(op_file)
        if ckpt:
            logging.info('load', ckpt.model_checkpoint_path)
            saver.restore(session, ckpt.model_checkpoint_path)
        else:
            logging.info("no previous session to load")

        for curr_epoch in range(num_epochs):
            train_cost = train_ler = 0
            start = time.time()

            for batch in range(num_batches_per_epoch):
                # Getting the index
                indices = [
                    i % num_examples
                    for i in range(batch * batch_size, (batch + 1) *
                                   batch_size)
                ]

                batch_train_inputs = train_inputs[indices]
                # Padding input to max_time_step of this batch
                batch_train_inputs, batch_train_seq_len = pad_sequences(
                    batch_train_inputs)

                # Converting to sparse representation so as to to feed SparseTensor input
                batch_train_targets = sparse_tuple_from(train_labels[indices])

                feed = {
                    inputs: batch_train_inputs,
                    targets: batch_train_targets,
                    seq_len: batch_train_seq_len
                }
                batch_cost, _ = session.run([error, optimizer], feed)
                train_cost += batch_cost * batch_size
                train_ler += session.run(label_error_rate,
                                         feed_dict=feed) * batch_size
                log = "Epoch {}/{}, iter {}, batch_cost {}"
                logging.info(
                    log.format(curr_epoch + 1, num_epochs, batch, batch_cost))

            saver.save(session,
                       os.path.join(ENV.output, 'best.ckpt'),
                       global_step=curr_epoch)

            # Shuffle the data
            shuffled_indexes = np.random.permutation(num_examples)
            train_inputs = train_inputs[shuffled_indexes]
            train_labels = train_labels[shuffled_indexes]

            # Metrics mean
            train_cost /= num_examples
            train_ler /= num_examples

            log = "Epoch {}/{}, train_cost = {:.3f}, train_ler = {:.3f}, time = {:.3f}"
            logging.info(
                log.format(curr_epoch + 1, num_epochs, train_cost, train_ler,
                           time.time() - start))

            #run the test data through
            indices = [
                i % len(test_data)
                for i in range(batch * batch_size, (batch + 1) * batch_size)
            ]
            test_inputs = test_data[indices]
            test_inputs, test_seq_len = pad_sequences(test_inputs)
            test_targets = sparse_tuple_from(test_labels[indices])
            feed_test = {
                inputs: test_inputs,
                targets: test_targets,
                seq_len: test_seq_len
            }
            test_cost, test_ler = session.run([error, label_error_rate],
                                              feed_dict=feed_test)
            log = "Epoch {}/{}, test_cost {}, test_ler {}"
            logging.info(
                log.format(curr_epoch + 1, num_epochs, test_cost, test_ler))

        input_features = [('strokeData', datatypes.Array(num_features))]
        output_features = [('labels', datatypes.Array(num_classes))]

        vars = tf.trainable_variables()
        weights = {'forward': {}, 'backward': {}}
        for _var in vars:
            name = _var.name.encode('utf-8')
            if name.startswith('bidirectional_rnn/fw'):
                key = name.replace('bidirectional_rnn/fw/', '')
                key = key.replace('multi_rnn_cell/cell_0/lstm_cell/', '')
                key = key.replace(':0', '')
                weights['forward'][key] = _var.eval()
            else:
                key = name.replace('bidirectional_rnn/bw/', '')
                key = key.replace('multi_rnn_cell/cell_0/lstm_cell/', '')
                key = key.replace(':0', '')
                weights['backward'][key] = _var.eval()

    builder = NeuralNetworkBuilder(input_features, output_features, mode=None)

    fw_biases = [
        weights['forward']['bias'][0 * num_hidden:1 * num_hidden],
        weights['forward']['bias'][1 * num_hidden:2 * num_hidden],
        weights['forward']['bias'][2 * num_hidden:3 * num_hidden],
        weights['forward']['bias'][3 * num_hidden:4 * num_hidden]
    ]

    bw_biases = [
        weights['backward']['bias'][0 * num_hidden:1 * num_hidden],
        weights['backward']['bias'][1 * num_hidden:2 * num_hidden],
        weights['backward']['bias'][2 * num_hidden:3 * num_hidden],
        weights['backward']['bias'][3 * num_hidden:4 * num_hidden]
    ]

    num_LSTM_gates = 5

    input_weights = {
        'forward': np.zeros((num_LSTM_gates - 1, num_hidden, num_features)),
        'backward': np.zeros((num_LSTM_gates - 1, num_hidden, num_features))
    }

    recurrent_weights = {
        'forward': np.zeros((num_LSTM_gates - 1, num_hidden, num_hidden)),
        'backward': np.zeros((num_LSTM_gates - 1, num_hidden, num_hidden))
    }

    builder.add_bidirlstm(
        name='bidirectional_1',
        W_h=recurrent_weights['forward'],
        W_x=input_weights['forward'],
        b=fw_biases,
        W_h_back=recurrent_weights['backward'],
        W_x_back=input_weights['backward'],
        b_back=bw_biases,
        hidden_size=num_hidden,
        input_size=num_features,
        input_names=[
            'strokeData', 'bidirectional_1_h_in', 'bidirectional_1_c_in',
            'bidirectional_1_h_in_rev', 'bidirectional_1_c_in_rev'
        ],
        output_names=[
            'y', 'bidirectional_1_h_out', 'bidirectional_1_c_out',
            'bidirectional_1_h_out_rev', 'bidirectional_1_c_out_rev'
        ],
        peep=[
            weights['forward']['w_i_diag'], weights['forward']['w_f_diag'],
            weights['forward']['w_o_diag']
        ],
        peep_back=[
            weights['backward']['w_i_diag'], weights['backward']['w_f_diag'],
            weights['backward']['w_o_diag']
        ],
        cell_clip_threshold=clip_thresh)

    builder.add_softmax(name='softmax', input_name='y', output_name='labels')

    optional_inputs = [('bidirectional_1_h_in', num_hidden),
                       ('bidirectional_1_c_in', num_hidden),
                       ('bidirectional_1_h_in_rev', num_hidden),
                       ('bidirectional_1_c_in_rev', num_hidden)]
    optional_outputs = [('bidirectional_1_h_out', num_hidden),
                        ('bidirectional_1_c_out', num_hidden),
                        ('bidirectional_1_h_out_rev', num_hidden),
                        ('bidirectional_1_c_out_rev', num_hidden)]

    #not really sure what this line belowe does, just copied it from the Keras converter in coremltools,
    # and it seemed to make things work
    builder.add_optionals(optional_inputs, optional_outputs)

    model = MLModel(builder.spec)

    model.short_description = 'Model for recognizing a symbols and diagrams drawn on ipad screen with apple pencil'

    model.input_description[
        'strokeData'] = 'A collection of strokes to classify'
    model.output_description[
        'labels'] = 'The "probability" of each label, in a dense array'

    outfile = 'bilstm.mlmodel'
    model.save(outfile)

    print('Saved to file: %s' % outfile)
import sys
import tfcoreml as tf_converter
from coremltools.models import MLModel

if __name__ == '__main__':
    if len(sys.argv) < 4:
        print('Usage: python {} source size target_path'.format(sys.argv[0]))
        sys.exit()
    size = int(sys.argv[2])
    tf_converter.convert(
        tf_model_path=sys.argv[1],
        mlmodel_path=sys.argv[3],
        output_feature_names=['fc1:0', 'fc3:0'],
        input_name_shape_dict={'x:0' : [1, size, size, 18]}
    )
    model = MLModel(sys.argv[3])
    model.user_defined_metadata["version"] = "1"
    model.user_defined_metadata["size"] = "19"
    model.user_defined_metadata["komi"] = "7.5"
    model.user_defined_metadata["c_init"] = "0.8"
    model.user_defined_metadata["softmax_temperature"] = "1.0"
    model.user_defined_metadata["virtual_loss"] = "3"
    model.save(sys.argv[3])