示例#1
0
    def test_convert_nn_spec_to_half_precision(self):
        # simple network with quantization layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        weights = np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(name='inner_product',
                                  W=weights,
                                  b=None,
                                  input_channels=3,
                                  output_channels=3,
                                  has_bias=False,
                                  input_name='data',
                                  output_name='out')
        model = MLModel(builder.spec)
        spec = convert_neural_network_spec_weights_to_fp16(model.get_spec())
        self.assertIsNotNone(spec)

        # simple network without quantization layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_lrn(name='lrn',
                        input_name='data',
                        output_name='out',
                        alpha=2,
                        beta=3,
                        local_size=1,
                        k=8)
        model = MLModel(builder.spec)
        spec = convert_neural_network_spec_weights_to_fp16(model.get_spec())
        self.assertIsNotNone(spec)
示例#2
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = 'Test author'
        self.assertEquals(model.author, 'Test author')
        self.assertEquals(model.get_spec().description.metadata.author, 'Test author')

        model.license = 'Test license'
        self.assertEquals(model.license, 'Test license')
        self.assertEquals(model.get_spec().description.metadata.license, 'Test license')

        model.short_description = 'Test model'
        self.assertEquals(model.short_description, 'Test model')
        self.assertEquals(model.get_spec().description.metadata.shortDescription, 'Test model')

        model.input_description['feature_1'] = 'This is feature 1'
        self.assertEquals(model.input_description['feature_1'], 'This is feature 1')

        model.output_description['output'] = 'This is output'
        self.assertEquals(model.output_description['output'], 'This is output')

        filename = tempfile.mktemp(suffix = '.mlmodel')
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEquals(model.author, 'Test author')
        self.assertEquals(model.license, 'Test license')
        # self.assertEquals(model.short_description, 'Test model')
        self.assertEquals(model.input_description['feature_1'], 'This is feature 1')
        self.assertEquals(model.output_description['output'], 'This is output')
示例#3
0
    def test_downgrade_specification_version(self):
        # manually set a invalid specification version
        self.spec.specificationVersion = -1
        model = MLModel(self.spec)
        assert model.get_spec().specificationVersion == 1

        # manually set a high specification version
        self.spec.specificationVersion = 4
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename, auto_set_specification_version=True)
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # simple neural network with only spec 1 layer
        input_features = [('data', datatypes.Array(3))]
        output_features = [('out', datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_activation('relu', 'RELU', 'data', 'out')
        # set a high specification version
        builder.spec.specificationVersion = 3
        model = MLModel(builder.spec)
        filename = tempfile.mktemp(suffix='.mlmodel')
        model.save(filename)
        # load the model back
        model = MLModel(filename)
        assert model.get_spec().specificationVersion == 1

        # test save without automatic set specification version
        self.spec.specificationVersion = 3
        filename = tempfile.mktemp(suffix='.mlmodel')
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # the specification version should be original
        assert model.get_spec().specificationVersion == 3
示例#4
0
def _convert_to_image_input(proto, inputs):
    tmp_model = MLModel(proto)
    for input_type in inputs:
        if isinstance(input_type, ImageType):
            if input_type.color_layout == "G":
                gray_bias = input_type.bias
                red_bias, green_bias, blue_bias = 0.0, 0.0, 0.0
            elif input_type.color_layout == "RGB":
                gray_bias = 0.0
                red_bias, green_bias, blue_bias = input_type.bias
            elif input_type.color_layout == "BGR":
                gray_bias = 0.0
                blue_bias, green_bias, red_bias = input_type.bias
            tmp_model = neural_network.utils.make_image_input(
                tmp_model,
                input_type.name,
                is_bgr=input_type.color_layout == "BGR",
                image_format="NCHW" if input_type.channel_first else "NHWC",
                red_bias=red_bias,
                green_bias=green_bias,
                blue_bias=blue_bias,
                gray_bias=gray_bias,
                scale=input_type.scale,
            )
    return tmp_model.get_spec()
示例#5
0
    def inference(cls, architecture, model_path, image_path):
        # TODO
        import numpy as np
        from coremltools.models._infer_shapes_nn_mlmodel import infer_shapes
        if cls.sanity_check(architecture):
            func = TestKit.preprocess_func['coreml'][architecture]
            img = func(image_path)

            # load model
            model = MLModel(model_path)
            spec = model.get_spec()

            # TODO: Multiple inputs
            input_name = spec.description.input[0].name

            # TODO: Multiple outputs
            output_name = spec.description.output[0].name

            # inference
            input_data = img
            coreml_input = {input_name: img}
            coreml_output = model.predict(coreml_input)

            prob = coreml_output[output_name].values()
            prob = np.array(prob).squeeze()

            return prob

        else:
            return None
示例#6
0
    def test_nn_classifier_util_file(self):
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)

        class_labels = ["a", "b", "c"]
        with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as f:
            f.write("\n".join(class_labels))
            f.flush()
            mlmodel = make_nn_classifier(
                mlmodel,
                class_labels=f.name,
                predicted_feature_name="out_confidence",
                predicted_probabilities_output="out",
            )
        out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True)
        self.assertEqual(out_dict["out_confidence"], "c")
        self.assertEqual(
            mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier"
        )
示例#7
0
def _convert_to_image_input(proto, inputs, skip_model_load=False):
    tmp_model = MLModel(proto, skip_model_load=skip_model_load)
    for input_type in inputs:
        if isinstance(input_type, ImageType):
            if input_type.color_layout in (ColorLayout.GRAYSCALE,
                                           ColorLayout.GRAYSCALE_FLOAT16):
                gray_bias = input_type.bias
                red_bias, green_bias, blue_bias = 0.0, 0.0, 0.0
            elif input_type.color_layout == ColorLayout.RGB:
                gray_bias = 0.0
                red_bias, green_bias, blue_bias = input_type.bias
            elif input_type.color_layout == ColorLayout.BGR:
                gray_bias = 0.0
                blue_bias, green_bias, red_bias = input_type.bias
            tmp_model = neural_network.utils.make_image_input(
                tmp_model,
                input_type.name,
                is_bgr=input_type.color_layout == ColorLayout.BGR,
                image_format="NCHW" if input_type.channel_first else "NHWC",
                red_bias=red_bias,
                green_bias=green_bias,
                blue_bias=blue_bias,
                gray_bias=gray_bias,
                scale=input_type.scale,
            )
    return tmp_model.get_spec()
示例#8
0
    def _load_model(self, model_network_path):
        """Load a Coreml model from disk

        Parameters
        ----------

        model_network_path: str
            Path where the model network path is (mlmodel file)

        Returns
        -------
        model: A coreml model
        """

        from coremltools.models import MLModel

        if os.path.isfile(model_network_path):
            # load the model network
            loaded_model_ml = MLModel(model_network_path)
            # convert to Model_pb2.Model
            loaded_model_pb = loaded_model_ml.get_spec()
            self.weight_loaded = True
            print("Network file [{}] is loaded successfully.".format(
                model_network_path))
        else:
            print("Warning: Weights File [{}] is not found.".format(
                model_network_path))

        return loaded_model_pb
示例#9
0
    def load_model(cls, path: str):
        """
        Deserializes a VGSL model from a CoreML file.

        Args:
            path (str): CoreML file

        Returns:
            A TorchVGSLModel instance.

        Raises:
            KrakenInvalidModelException if the model data is invalid (not a
            string, protobuf file, or without appropriate metadata).
            FileNotFoundError if the path doesn't point to a file.
        """
        try:
            mlmodel = MLModel(path)
        except TypeError as e:
            raise KrakenInvalidModelException(str(e))
        except DecodeError as e:
            raise KrakenInvalidModelException('Failure parsing model protobuf: {}'.format(str(e)))
        if 'vgsl' not in mlmodel.user_defined_metadata:
            raise KrakenInvalidModelException('No VGSL spec in model metadata')
        vgsl_spec = mlmodel.user_defined_metadata['vgsl']
        nn = cls(vgsl_spec)
        for name, layer in nn.nn.named_children():
            layer.deserialize(name, mlmodel.get_spec())

        if 'codec' in mlmodel.user_defined_metadata:
            nn.add_codec(PytorchCodec(json.loads(mlmodel.user_defined_metadata['codec'])))

        nn.user_metadata = {'accuracy': [], 'seg_type': 'bbox', 'one_channel_mode': '1', 'model_type': None, 'hyper_params': {}}  # type: dict[str, str]
        if 'kraken_meta' in mlmodel.user_defined_metadata:
            nn.user_metadata.update(json.loads(mlmodel.user_defined_metadata['kraken_meta']))
        return nn
示例#10
0
    def test_nn_set_training_input(self):
        builder = self.create_base_builder()

        builder.set_mean_squared_error_loss(name="mse",
                                            input_feature=("output",
                                                           datatypes.Array(3)))

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertEqual(spec.description.trainingInput[0].name, "input")
        self.assertEqual(
            spec.description.trainingInput[0].type.WhichOneof("Type"),
            "multiArrayType")
        self.assertEqual(spec.description.trainingInput[1].name, "output_true")
        self.assertEqual(
            spec.description.trainingInput[1].type.WhichOneof("Type"),
            "multiArrayType")
示例#11
0
    def test_nn_set_training_input(self):

        builder = self.create_base_builder()

        builder.set_mean_squared_error_loss(name='mse',
                                            input='output',
                                            target='target')

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        builder.set_training_input([('input', datatypes.Array(3)),
                                    ('target', 'Double')])

        model_path = os.path.join(self.model_dir, 'updatable_creation.mlmodel')
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertEqual(spec.description.trainingInput[0].name, 'input')
        self.assertEqual(
            spec.description.trainingInput[0].type.WhichOneof('Type'),
            'multiArrayType')
        self.assertEqual(spec.description.trainingInput[1].name, 'target')
        self.assertEqual(
            spec.description.trainingInput[1].type.WhichOneof('Type'),
            'doubleType')
示例#12
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = "Test author"
        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.get_spec().description.metadata.author,
                         "Test author")

        model.license = "Test license"
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.get_spec().description.metadata.license,
                         "Test license")

        model.short_description = "Test model"
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription,
            "Test model")

        model.version = "1.3"
        self.assertEqual(model.version, "1.3")
        self.assertEqual(model.get_spec().description.metadata.versionString,
                         "1.3")

        model.input_description["feature_1"] = "This is feature 1"
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")

        model.output_description["output"] = "This is output"
        self.assertEqual(model.output_description["output"], "This is output")

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")
        self.assertEqual(model.output_description["output"], "This is output")

        # cleanup
        MLModelTest._remove_path(package.name)
示例#13
0
    def test_nn_builder_with_training_features(self):

        input_features = [('input', datatypes.Array(3))]
        output_features = [('output', None)]
        training_features = [('input', datatypes.Array(3)),
                             ('target', datatypes.Double)]

        builder = NeuralNetworkBuilder(input_features,
                                       output_features,
                                       disable_rank5_shape_mapping=True,
                                       training_features=training_features)

        W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
        W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(name='ip1',
                                  W=W1,
                                  b=None,
                                  input_channels=3,
                                  output_channels=3,
                                  has_bias=False,
                                  input_name='input',
                                  output_name='hidden')
        builder.add_inner_product(name='ip2',
                                  W=W2,
                                  b=None,
                                  input_channels=3,
                                  output_channels=3,
                                  has_bias=False,
                                  input_name='hidden',
                                  output_name='output')

        builder.make_updatable(['ip1', 'ip2'])  # or a dict for weightParams

        builder.set_mean_squared_error_loss(name='mse',
                                            input='output',
                                            target='target')

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        builder.set_training_input([('input', datatypes.Array(3)),
                                    ('target', 'Double')])

        model_path = os.path.join(self.model_dir, 'updatable_creation.mlmodel')
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertEqual(spec.description.trainingInput[0].name, 'input')
        self.assertEqual(
            spec.description.trainingInput[0].type.WhichOneof('Type'),
            'multiArrayType')
        self.assertEqual(spec.description.trainingInput[1].name, 'target')
        self.assertEqual(
            spec.description.trainingInput[1].type.WhichOneof('Type'),
            'doubleType')
示例#14
0
 def test_path_to_mlmodel_to_spec_to_mlmodel(self):
     """
     load an mlmodel from disk, convert it to spec, and then convert the spec back to mlmodel
     """
     mlmodel_from_disk = MLModel(self.mlpackage_path)
     spec = mlmodel_from_disk.get_spec()
     mlmodel_from_spec = MLModel(spec, weights_dir=mlmodel_from_disk.weights_dir)
     self._test_mlmodel_correctness(mlmodel_from_spec)
示例#15
0
def _convert_to_classifier(proto, classifier_config, skip_model_load=False):
    tmp_model = MLModel(proto, skip_model_load=skip_model_load)
    tmp_model = neural_network.utils.make_nn_classifier(
        tmp_model,
        classifier_config.class_labels,
        classifier_config.predicted_feature_name,
        classifier_config.predicted_probabilities_output,
    )
    return tmp_model.get_spec()
示例#16
0
    def test_model_api(self):
        model = MLModel(self.spec)
        self.assertIsNotNone(model)

        model.author = "Test author"
        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.get_spec().description.metadata.author,
                         "Test author")

        model.license = "Test license"
        self.assertEqual(model.license, "Test license")
        self.assertEqual(model.get_spec().description.metadata.license,
                         "Test license")

        model.short_description = "Test model"
        self.assertEqual(model.short_description, "Test model")
        self.assertEqual(
            model.get_spec().description.metadata.shortDescription,
            "Test model")

        model.version = "1.3"
        self.assertEqual(model.version, "1.3")
        self.assertEqual(model.get_spec().description.metadata.versionString,
                         "1.3")

        model.input_description["feature_1"] = "This is feature 1"
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")

        model.output_description["output"] = "This is output"
        self.assertEqual(model.output_description["output"], "This is output")

        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        loaded_model = MLModel(filename)

        self.assertEqual(model.author, "Test author")
        self.assertEqual(model.license, "Test license")
        # self.assertEqual(model.short_description, 'Test model')
        self.assertEqual(model.input_description["feature_1"],
                         "This is feature 1")
        self.assertEqual(model.output_description["output"], "This is output")
示例#17
0
    def test_model_api(self):
        model = MLModel(self.spec)
        assert model is not None

        model.author = "Test author"
        assert model.author == "Test author"
        assert model.get_spec().description.metadata.author == "Test author"

        model.license = "Test license"
        assert model.license == "Test license"
        assert model.get_spec().description.metadata.license == "Test license"

        model.short_description = "Test model"
        assert model.short_description == "Test model"
        assert model.get_spec(
        ).description.metadata.shortDescription == "Test model"

        model.version = "1.3"
        assert model.version == "1.3"
        assert model.get_spec().description.metadata.versionString == "1.3"

        model.input_description["feature_1"] = "This is feature 1"
        assert model.input_description["feature_1"] == "This is feature 1"

        model.output_description["output"] = "This is output"
        assert model.output_description["output"] == "This is output"

        package = tempfile.TemporaryDirectory(suffix=".mlpackage")
        package.cleanup()

        model.save(package.name)
        loaded_model = MLModel(package.name)

        assert model.author == "Test author"
        assert model.license == "Test license"
        assert model.short_description == "Test model"
        assert model.input_description["feature_1"] == "This is feature 1"
        assert model.output_description["output"] == "This is output"

        # cleanup
        _remove_path(package.name)
示例#18
0
    def test_nn_builder_with_training_features(self):
        input_features = [("input", datatypes.Array(3))]
        output_features = [("output", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)

        W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
        W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
        builder.add_inner_product(
            name="ip1",
            W=W1,
            b=None,
            input_channels=3,
            output_channels=3,
            has_bias=False,
            input_name="input",
            output_name="hidden",
        )
        builder.add_inner_product(
            name="ip2",
            W=W2,
            b=None,
            input_channels=3,
            output_channels=3,
            has_bias=False,
            input_name="hidden",
            output_name="output",
        )

        builder.make_updatable(["ip1", "ip2"])  # or a dict for weightParams

        builder.set_mean_squared_error_loss(name="mse",
                                            input_feature=("output",
                                                           datatypes.Array(3)))

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertEqual(spec.description.trainingInput[0].name, "input")
        self.assertEqual(
            spec.description.trainingInput[0].type.WhichOneof("Type"),
            "multiArrayType")
        self.assertEqual(spec.description.trainingInput[1].name, "output_true")
        self.assertEqual(
            spec.description.trainingInput[1].type.WhichOneof("Type"),
            "multiArrayType")
示例#19
0
    def test_downgrade_specification_version(self):
        # manually set a invalid specification version
        self.spec.specificationVersion = -1
        model = MLModel(self.spec)
        if model.get_spec().specificationVersion != 1:
            raise AssertionError

        # manually set a high specification version
        self.spec.specificationVersion = 4
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=True)
        model = MLModel(filename)
        if model.get_spec().specificationVersion != 1:
            raise AssertionError

        # simple neural network with only spec 1 layer
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(input_features, output_features)
        builder.add_activation("relu", "RELU", "data", "out")
        # set a high specification version
        builder.spec.specificationVersion = 3
        model = MLModel(builder.spec)
        filename = tempfile.mktemp(suffix=".mlmodel")
        model.save(filename)
        # load the model back
        model = MLModel(filename)
        if model.get_spec().specificationVersion != 1:
            raise AssertionError

        # test save without automatic set specification version
        self.spec.specificationVersion = 3
        filename = tempfile.mktemp(suffix=".mlmodel")
        save_spec(self.spec, filename, auto_set_specification_version=False)
        model = MLModel(filename)
        # the specification version should be original
        if model.get_spec().specificationVersion != 3:
            raise AssertionError
示例#20
0
    def inference(cls, architecture, model_path, image_path):
        # TODO
        from PIL import Image
        import numpy as np
        from coremltools.models._infer_shapes_nn_mlmodel import infer_shapes
        if cls.sanity_check(architecture):
            func = TestKit.preprocess_func['coreml'][architecture]

            import inspect
            funcstr = inspect.getsource(func)

            if len(funcstr.split(',')) == 3:
                size = int(funcstr.split('path,')[1].split(')')[0])
            else:
                size = int(funcstr.split('path,')[1].split(',')[0])

            img = Image.open(image_path)
            img = img.resize((size, size))

            # load model
            model = MLModel(model_path)
            spec = model.get_spec()

            # TODO: Multiple inputs
            input_name = spec.description.input[0].name

            # TODO: Multiple outputs
            output_name = spec.description.output[0].name

            # inference
            input_data = img
            coreml_input = {input_name: img}
            coreml_output = model.predict(coreml_input)

            prob = coreml_output[output_name]
            if isinstance(prob, dict):
                prob = coreml_output[output_name].values()
            prob = np.array(prob).squeeze()

            return prob

        else:
            return None
示例#21
0
 def test_nn_classifier_util(self):
     input_features = [("data", datatypes.Array(3))]
     output_features = [("out", datatypes.Array(3))]
     builder = NeuralNetworkBuilder(input_features,
                                    output_features,
                                    disable_rank5_shape_mapping=True)
     builder.add_activation("linear", "LINEAR", "data", "out")
     spec = builder.spec
     mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY)
     mlmodel = make_nn_classifier(
         mlmodel,
         class_labels=["a", "b", "c"],
         predicted_feature_name="out_confidence",
         predicted_probabilities_output="out",
     )
     out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])})
     self.assertEqual(out_dict["out_confidence"], "c")
     self.assertEqual(mlmodel.get_spec().WhichOneof("Type"),
                      "neuralNetworkClassifier")
示例#22
0
    def load_model(cls, path: str):
        """
        Deserializes a VGSL model from a CoreML file.

        Args:
            path (str): CoreML file
        """
        mlmodel = MLModel(path)
        if 'vgsl' not in mlmodel.user_defined_metadata:
            raise ValueError('No VGSL spec in model metadata')
        vgsl_spec = mlmodel.user_defined_metadata['vgsl']
        nn = cls(vgsl_spec)
        for name, layer in nn.nn.named_children():
            layer.deserialize(name, mlmodel.get_spec())

        if 'codec' in mlmodel.user_defined_metadata:
            nn.add_codec(
                PytorchCodec(json.loads(
                    mlmodel.user_defined_metadata['codec'])))
        return nn
示例#23
0
    def load_model(cls, path: str):
        """
        Deserializes a VGSL model from a CoreML file.

        Args:
            path (str): CoreML file
        """
        mlmodel = MLModel(path)
        if 'vgsl' not in mlmodel.user_defined_metadata:
            raise ValueError('No VGSL spec in model metadata')
        vgsl_spec = mlmodel.user_defined_metadata['vgsl']
        nn = cls(vgsl_spec)
        for name, layer in nn.nn.named_children():
            layer.deserialize(name, mlmodel.get_spec())

        if 'codec' in mlmodel.user_defined_metadata:
            nn.add_codec(PytorchCodec(json.loads(mlmodel.user_defined_metadata['codec'])))
        if 'kraken_meta' in mlmodel.user_defined_metadata:
            nn.user_metadata = json.loads(mlmodel.user_defined_metadata['kraken_meta'])
        return nn
示例#24
0
    def test_rename_output_nn_classifier(self):
        input_features = [("data", datatypes.Array(3))]
        output_features = [("out", datatypes.Array(3))]
        builder = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder.add_activation("linear", "LINEAR", "data", "out")
        spec = builder.spec
        mlmodel = MLModel(spec)

        class_labels = ["a", "b", "c"]
        mlmodel = make_nn_classifier(mlmodel, class_labels=["a", "b", "c"])

        # rename output
        spec = mlmodel.get_spec()
        rename_feature(spec, "out", "new_out_name")
        mlmodel = MLModel(spec)

        out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True)
        self.assertEqual(out_dict["classLabel"], "c")
        self.assertTrue("new_out_name" in out_dict)
        self.assertTrue(isinstance(out_dict["new_out_name"], dict))
示例#25
0
    def test_updatable_model_creation_mse_sgd(self):

        builder = self.create_base_builder()

        builder.set_mean_squared_error_loss(name='mse',
                                            input='output',
                                            target='target')

        builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0))

        builder.set_epochs(20)

        model_path = os.path.join(self.model_dir, 'updatable_creation.mlmodel')
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertTrue(spec.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)

        self.assertTrue(spec.neuralNetwork.updateParams.lossLayers[0].
                        categoricalCrossEntropyLossLayer is not None)
        self.assertTrue(
            spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None)

        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        learningRate.defaultValue,
                        1e-2,
                        atol=1e-4))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        miniBatchSize.defaultValue,
                        10,
                        atol=1e-4))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        momentum.defaultValue,
                        0,
                        atol=1e-8))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.epochs.defaultValue,
                        20,
                        atol=1e-4))

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        learningRate.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        learningRate.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        miniBatchSize.set.values == [10])

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        momentum.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        momentum.range.maxValue == 1)
示例#26
0
from coremltools.models import MLModel

m = MLModel("openface.mlmodel")
print(m.get_spec())
示例#27
0
    def test_updatable_model_creation_ce_sgd(self):
        builder = self.create_base_builder()

        builder.add_softmax(name="softmax",
                            input_name="output",
                            output_name="softmax_output")

        builder.set_categorical_cross_entropy_loss(name="cross_entropy",
                                                   input="softmax_output")

        builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0))
        builder.set_epochs(20, allowed_set=[10, 20, 30, 40])

        model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)

        spec = mlmodel.get_spec()
        self.assertTrue(spec.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)

        self.assertTrue(spec.neuralNetwork.updateParams.lossLayers[0].
                        categoricalCrossEntropyLossLayer is not None)
        self.assertTrue(
            spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None)

        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                learningRate.defaultValue,
                1e-2,
                atol=1e-4,
            ))
        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                miniBatchSize.defaultValue,
                10,
                atol=1e-4,
            ))
        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                momentum.defaultValue,
                0,
                atol=1e-8,
            ))

        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.epochs.defaultValue,
                        20,
                        atol=1e-4))

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        learningRate.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        learningRate.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        miniBatchSize.set.values == [10])

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        momentum.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.
                        momentum.range.maxValue == 1)
示例#28
0
class VGGishFeatureExtractor(object):
    name = 'VGGish'
    output_length = 12288
    input_sample_rate = SAMPLE_RATE

    @staticmethod
    def _preprocess_data(audio_data, verbose=True):
        '''
        Preprocess each example, breaking it up into frames.

        Returns two numpy arrays: preprocessed frame and their indexes
        '''
        from .vggish_input import waveform_to_examples

        last_progress_update = _time.time()
        progress_header_printed = False

        # Can't run as a ".apply(...)" due to numba.jit decorator issue:
        # https://github.com/apple/turicreate/issues/1216
        preprocessed_data, audio_data_index = [], []
        for i, audio_dict in enumerate(audio_data):
            scaled_data = audio_dict['data'] / 32768.0
            data = waveform_to_examples(scaled_data, audio_dict['sample_rate'])

            for j in data:
                preprocessed_data.append([j])
                audio_data_index.append(i)

            # If `verbose` is set, print an progress update about every 20s
            if verbose and _time.time() - last_progress_update >= 20:
                if not progress_header_printed:
                    print("Preprocessing audio data -")
                    progress_header_printed = True
                print("Preprocessed {} of {} examples".format(
                    i, len(audio_data)))
                last_progress_update = _time.time()

        if progress_header_printed:
            print("Preprocessed {} of {} examples\n".format(
                len(audio_data), len(audio_data)))
        return _np.asarray(preprocessed_data), audio_data_index

    def __init__(self):
        vggish_model_file = VGGish()
        self.mac_ver = _mac_ver()

        if self.mac_ver < (10, 14):
            # Use TensorFlow/Keras
            import turicreate.toolkits._tf_utils as _utils
            self.gpu_policy = _utils.TensorFlowGPUPolicy()
            self.gpu_policy.start()

            model_path = vggish_model_file.get_model_path(format='tensorflow')
            self.vggish_model = _keras.models.load_model(model_path)
        else:
            # Use Core ML
            model_path = vggish_model_file.get_model_path(format='coreml')
            self.vggish_model = MLModel(model_path)

    def __del__(self):
        if self.mac_ver < (10, 14):
            self.gpu_policy.stop()

    def _extract_features(self, preprocessed_data, verbose=True):
        """
        Parameters
        ----------
        preprocessed_data : SArray

        Returns
        -------
        numpy array containing the deep features
        """
        last_progress_update = _time.time()
        progress_header_printed = False

        deep_features = _tc.SArrayBuilder(_np.ndarray)

        if _mac_ver() < (10, 14):
            # Use TensorFlow/Keras

            # Transpose data from channel first to channel last
            preprocessed_data = _np.transpose(preprocessed_data, (0, 2, 3, 1))

            for i, cur_example in enumerate(preprocessed_data):
                y = self.vggish_model.predict([[cur_example]])
                deep_features.append(y[0])

                # If `verbose` is set, print an progress update about every 20s
                if verbose and _time.time() - last_progress_update >= 20:
                    if not progress_header_printed:
                        print("Extracting deep features -")
                        progress_header_printed = True
                    print("Extracted {} of {}".format(i,
                                                      len(preprocessed_data)))
                    last_progress_update = _time.time()
            if progress_header_printed:
                print("Extracted {} of {}\n".format(len(preprocessed_data),
                                                    len(preprocessed_data)))

        else:
            # Use Core ML

            for i, cur_example in enumerate(preprocessed_data):
                for cur_frame in cur_example:
                    x = {'input1': _np.asarray([cur_frame])}
                    y = self.vggish_model.predict(x)
                    deep_features.append(y['output1'])

                # If `verbose` is set, print an progress update about every 20s
                if verbose and _time.time() - last_progress_update >= 20:
                    if not progress_header_printed:
                        print("Extracting deep features -")
                        progress_header_printed = True
                    print("Extracted {} of {}".format(i,
                                                      len(preprocessed_data)))
                    last_progress_update = _time.time()
            if progress_header_printed:
                print("Extracted {} of {}\n".format(len(preprocessed_data),
                                                    len(preprocessed_data)))

        return deep_features.close()

    def get_deep_features(self, audio_data, verbose):
        '''
        Performs both audio preprocessing and VGGish deep feature extraction.
        '''
        preprocessed_data, row_ids = self._preprocess_data(audio_data, verbose)
        deep_features = self._extract_features(preprocessed_data, verbose)

        output = _tc.SFrame({
            'deep features': deep_features,
            'row id': row_ids
        })
        output = output.unstack('deep features')

        max_row_id = len(audio_data)
        missing_ids = set(range(max_row_id)) - set(output['row id'].unique())
        if len(missing_ids) != 0:
            empty_rows = _tc.SFrame({
                'List of deep features': [[] for _ in range(len(missing_ids))],
                'row id':
                missing_ids
            })
            output = output.append(empty_rows)

        output = output.sort('row id')
        return output['List of deep features']

    def get_spec(self):
        """
        Return the Core ML spec
        """
        if _mac_ver() >= (10, 14):
            return self.vggish_model.get_spec()
        else:
            vggish_model_file = VGGish()
            coreml_model_path = vggish_model_file.get_model_path(
                format='coreml')
            return MLModel(coreml_model_path).get_spec()
示例#29
0
    def test_updatable_model_creation_mse_adam(self):
        builder = self.create_base_builder()

        builder.set_mean_squared_error_loss(name="mse",
                                            input_feature=("output",
                                                           datatypes.Array(3)))

        builder.set_adam_optimizer(
            AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8))
        builder.set_epochs(20, allowed_set=[10, 20, 30])

        model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertTrue(spec.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)

        self.assertTrue(spec.neuralNetwork.updateParams.lossLayers[0].
                        categoricalCrossEntropyLossLayer is not None)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.adamOptimizer
                        is not None)

        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.adamOptimizer.
                learningRate.defaultValue,
                1e-2,
                atol=1e-4,
            ))
        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.adamOptimizer.
                miniBatchSize.defaultValue,
                10,
                atol=1e-4,
            ))
        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.
                defaultValue,
                0.9,
                atol=1e-4,
            ))
        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.
                defaultValue,
                0.999,
                atol=1e-4,
            ))
        self.assertTrue(
            _np.isclose(
                spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.
                defaultValue,
                1e-8,
                atol=1e-8,
            ))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.epochs.defaultValue,
                        20,
                        atol=1e-4))

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.learningRate.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.learningRate.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.miniBatchSize.set.values == [10])

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta1.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta1.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta2.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta2.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.eps.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.eps.range.maxValue == 1)

        self.assertTrue(
            spec.neuralNetwork.updateParams.epochs.set.values == [10, 20, 30])
class VGGishFeatureExtractor(object):
    name = 'VGGish'
    output_length = 12288

    @staticmethod
    def _preprocess_data(audio_data, verbose=True):
        '''
        Preprocess each example, breaking it up into frames.

        Returns two numpy arrays: preprocessed frame and their indexes
        '''
        from .vggish_input import waveform_to_examples

        last_progress_update = _time.time()
        progress_header_printed = False

        # Can't run as a ".apply(...)" due to numba.jit decorator issue:
        # https://github.com/apple/turicreate/issues/1216
        preprocessed_data, audio_data_index = [], []
        for i, audio_dict in enumerate(audio_data):
            scaled_data = audio_dict['data'] / 32768.0
            data = waveform_to_examples(scaled_data, audio_dict['sample_rate'])

            for j in data:
                preprocessed_data.append([j])
                audio_data_index.append(i)

            # If `verbose` is set, print an progress update about every 20s
            if verbose and _time.time() - last_progress_update >= 20:
                if not progress_header_printed:
                    print("Preprocessing audio data -")
                    progress_header_printed = True
                print("Preprocessed {} of {} examples".format(
                    i, len(audio_data)))
                last_progress_update = _time.time()

        if progress_header_printed:
            print("Preprocessed {} of {} examples\n".format(
                len(audio_data), len(audio_data)))
        return _np.asarray(preprocessed_data), audio_data_index

    @staticmethod
    def _build_net():
        net = nn.HybridSequential()
        net.add(
            nn.Conv2D(channels=64,
                      kernel_size=(3, 3),
                      in_channels=1,
                      padding=(1, 1),
                      prefix='vggish_conv0_'))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D())
        net.add(
            nn.Conv2D(channels=128,
                      kernel_size=(3, 3),
                      in_channels=64,
                      padding=(1, 1),
                      prefix='vggish_conv1_'))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D())
        net.add(
            nn.Conv2D(channels=256,
                      kernel_size=(3, 3),
                      in_channels=128,
                      padding=(1, 1),
                      prefix='vggish_conv2_'))
        net.add(nn.Activation('relu'))
        net.add(
            nn.Conv2D(channels=256,
                      kernel_size=(3, 3),
                      in_channels=256,
                      padding=(1, 1),
                      prefix='vggish_conv3_'))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D())
        net.add(
            nn.Conv2D(channels=512,
                      kernel_size=(3, 3),
                      in_channels=256,
                      padding=(1, 1),
                      prefix='vggish_conv4_'))
        net.add(nn.Activation('relu'))
        net.add(
            nn.Conv2D(channels=512,
                      kernel_size=(3, 3),
                      in_channels=512,
                      padding=(1, 1),
                      prefix='vggish_conv5_'))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D())
        net.add(Flatten_channel_last())
        return net

    def __init__(self):
        vggish_model_file = VGGish()

        if _mac_ver() < (10, 14):
            # Use MXNet
            model_path = vggish_model_file.get_model_path(format='mxnet')
            self.vggish_model = VGGishFeatureExtractor._build_net()
            net_params = self.vggish_model.collect_params()
            self.ctx = _mxnet_utils.get_mxnet_context()
            net_params.load(model_path, ctx=self.ctx)
        else:
            # Use Core ML
            model_path = vggish_model_file.get_model_path(format='coreml')
            self.vggish_model = MLModel(model_path)

    def _extract_features(self, preprocessed_data, verbose=True):
        """
        Parameters
        ----------
        preprocessed_data : SArray

        Returns
        -------
        numpy array containing the deep features
        """
        last_progress_update = _time.time()
        progress_header_printed = False

        deep_features = _tc.SArrayBuilder(_np.ndarray)

        if _mac_ver() < (10, 14):
            # Use MXNet
            preprocessed_data = mx.nd.array(preprocessed_data)

            ctx_list = self.ctx
            if len(preprocessed_data) < len(ctx_list):
                ctx_list = ctx_list[:len(preprocessed_data)]
            batches = utils.split_and_load(preprocessed_data,
                                           ctx_list=ctx_list,
                                           even_split=False)

            for i, cur_batch in enumerate(batches):
                y = self.vggish_model.forward(cur_batch).asnumpy()
                for j in y:
                    deep_features.append(j)

                # If `verbose` is set, print an progress update about every 20s
                if verbose and _time.time() - last_progress_update >= 20:
                    if not progress_header_printed:
                        print("Extracting deep features -")
                        progress_header_printed = True
                    print("Extracted {} of {} batches".format(i, len(batches)))
                    last_progress_update = _time.time()
            if progress_header_printed:
                print("Extracted {} of {} batches\n".format(
                    len(batches), len(batches)))

        else:
            # Use Core ML
            for i, cur_example in enumerate(preprocessed_data):
                for cur_frame in cur_example:
                    x = {'input1': [cur_frame]}
                    y = self.vggish_model.predict(x)
                    deep_features.append(y['output1'])

                # If `verbose` is set, print an progress update about every 20s
                if verbose and _time.time() - last_progress_update >= 20:
                    if not progress_header_printed:
                        print("Extracting deep features -")
                        progress_header_printed = True
                    print("Extracted {} of {}".format(i,
                                                      len(preprocessed_data)))
                    last_progress_update = _time.time()
            if progress_header_printed:
                print("Extracted {} of {}\n".format(len(preprocessed_data),
                                                    len(preprocessed_data)))

        return deep_features.close()

    def get_deep_features(self, audio_data, verbose):
        '''
        Performs both audio preprocessing and VGGish deep feature extraction.
        '''
        preprocessed_data, row_ids = self._preprocess_data(audio_data, verbose)
        deep_features = self._extract_features(preprocessed_data, verbose)

        output = _tc.SFrame({
            'deep features': deep_features,
            'row id': row_ids
        })
        output = output.unstack('deep features')
        output = output.sort('row id')
        return output['List of deep features']

    def get_spec(self):
        """
        Return the Core ML spec
        """
        if _mac_ver() >= (10, 14):
            return self.vggish_model.get_spec()
        else:
            vggish_model_file = VGGish()
            coreml_model_path = vggish_model_file.get_model_path(
                format='coreml')
            return MLModel(coreml_model_path).get_spec()
示例#31
0
    def test_updatable_model_creation_ce_adam(self):

        builder = self.create_base_builder()

        builder.add_softmax(name='softmax',
                            input_name='output',
                            output_name='softmax_output')

        builder.set_categorical_cross_entropy_loss(name='cross_entropy',
                                                   input='softmax_output',
                                                   target='target')

        adam_params = AdamParams()
        adam_params.set_batch(value=10, allowed_set=[10, 20])
        builder.set_adam_optimizer(adam_params)
        builder.set_epochs(20)

        model_path = os.path.join(self.model_dir, 'updatable_creation.mlmodel')
        print(model_path)
        save_spec(builder.spec, model_path)

        mlmodel = MLModel(model_path)
        self.assertTrue(mlmodel is not None)
        spec = mlmodel.get_spec()
        self.assertTrue(spec.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
        self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
        self.assertTrue(
            spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)

        self.assertTrue(spec.neuralNetwork.updateParams.lossLayers[0].
                        categoricalCrossEntropyLossLayer is not None)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.adamOptimizer
                        is not None)

        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.learningRate.defaultValue,
                        1e-2,
                        atol=1e-4))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.miniBatchSize.defaultValue,
                        10,
                        atol=1e-4))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta1.defaultValue,
                        0.9,
                        atol=1e-4))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta2.defaultValue,
                        0.999,
                        atol=1e-4))
        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.eps.defaultValue,
                        1e-8,
                        atol=1e-8))

        self.assertTrue(
            _np.isclose(spec.neuralNetwork.updateParams.epochs.defaultValue,
                        20,
                        atol=1e-4))

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.learningRate.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.learningRate.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.miniBatchSize.set.values == [10, 20])

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta1.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta1.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta2.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.beta2.range.maxValue == 1)

        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.eps.range.minValue == 0)
        self.assertTrue(spec.neuralNetwork.updateParams.optimizer.
                        adamOptimizer.eps.range.maxValue == 1)

        self.assertTrue(
            spec.neuralNetwork.updateParams.epochs.set.values == [20])