def create_base_builder(self): self.input_features = [("input", datatypes.Array(3))] self.output_features = [("output", None)] self.output_names = ["output"] builder = NeuralNetworkBuilder(self.input_features, self.output_features) W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) builder.add_inner_product( name="ip1", W=W1, b=None, input_channels=3, output_channels=3, has_bias=False, input_name="input", output_name="hidden", ) builder.add_inner_product( name="ip2", W=W2, b=None, input_channels=3, output_channels=3, has_bias=False, input_name="hidden", output_name="output", ) builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams return builder
def create_base_builder(self): self.input_features = [('input', datatypes.Array(3))] self.output_features = [('output', None)] self.output_names = ["output"] builder = NeuralNetworkBuilder(self.input_features, self.output_features, disable_rank5_shape_mapping=True) W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) builder.add_inner_product(name='ip1', W=W1, b=None, input_channels=3, output_channels=3, has_bias=False, input_name='input', output_name='hidden') builder.add_inner_product(name='ip2', W=W2, b=None, input_channels=3, output_channels=3, has_bias=False, input_name='hidden', output_name='output') builder.make_updatable(['ip1', 'ip2']) # or a dict for weightParams return builder
def test_make_updatable_with_bilstm(self): from keras.models import Sequential from keras.layers import Dense, LSTM from coremltools.converters import keras as keras_converter from keras.layers.wrappers import Bidirectional import numpy as np num_classes = 6 model = Sequential() model.add( Bidirectional(LSTM(32, input_shape=(10, 32)), input_shape=(10, 32))) model.add(Dense(num_classes, activation="softmax")) model.set_weights( [np.random.rand(*w.shape) for w in model.get_weights()]) input_names = ["input"] output_names = ["zzzz"] class_labels = ["a", "b", "c", "d", "e", "f"] predicted_feature_name = "pf" coremlmodel = keras_converter.convert( model, input_names, output_names, class_labels=class_labels, predicted_feature_name=predicted_feature_name, predicted_probabilities_output=output_names[0], ) spec = coremlmodel.get_spec() builder = NeuralNetworkBuilder(spec=spec) # we could be able to make "dense_1" updatable without any issue builder.make_updatable([spec.neuralNetworkClassifier.layers[1].name])
def test_nn_builder_with_training_features(self): input_features = [('input', datatypes.Array(3))] output_features = [('output', None)] training_features = [('input', datatypes.Array(3)), ('target', datatypes.Double)] builder = NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True, training_features=training_features) W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) builder.add_inner_product(name='ip1', W=W1, b=None, input_channels=3, output_channels=3, has_bias=False, input_name='input', output_name='hidden') builder.add_inner_product(name='ip2', W=W2, b=None, input_channels=3, output_channels=3, has_bias=False, input_name='hidden', output_name='output') builder.make_updatable(['ip1', 'ip2']) # or a dict for weightParams builder.set_mean_squared_error_loss(name='mse', input='output', target='target') builder.set_adam_optimizer( AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)) builder.set_epochs(20, allowed_set=[10, 20, 30]) builder.set_training_input([('input', datatypes.Array(3)), ('target', 'Double')]) model_path = os.path.join(self.model_dir, 'updatable_creation.mlmodel') print(model_path) save_spec(builder.spec, model_path) mlmodel = MLModel(model_path) self.assertTrue(mlmodel is not None) spec = mlmodel.get_spec() self.assertEqual(spec.description.trainingInput[0].name, 'input') self.assertEqual( spec.description.trainingInput[0].type.WhichOneof('Type'), 'multiArrayType') self.assertEqual(spec.description.trainingInput[1].name, 'target') self.assertEqual( spec.description.trainingInput[1].type.WhichOneof('Type'), 'doubleType')
def test_nn_builder_with_training_features(self): input_features = [("input", datatypes.Array(3))] output_features = [("output", datatypes.Array(3))] builder = NeuralNetworkBuilder(input_features, output_features) W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) builder.add_inner_product( name="ip1", W=W1, b=None, input_channels=3, output_channels=3, has_bias=False, input_name="input", output_name="hidden", ) builder.add_inner_product( name="ip2", W=W2, b=None, input_channels=3, output_channels=3, has_bias=False, input_name="hidden", output_name="output", ) builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams builder.set_mean_squared_error_loss(name="mse", input_feature=("output", datatypes.Array(3))) builder.set_adam_optimizer( AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)) builder.set_epochs(20, allowed_set=[10, 20, 30]) model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") print(model_path) save_spec(builder.spec, model_path) mlmodel = MLModel(model_path) self.assertTrue(mlmodel is not None) spec = mlmodel.get_spec() self.assertEqual(spec.description.trainingInput[0].name, "input") self.assertEqual( spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType") self.assertEqual(spec.description.trainingInput[1].name, "output_true") self.assertEqual( spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType")
def test_nn_fp16_make_updatable_fail(self): nn_builder = self.create_base_builder(is_updatable=False) model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") print(model_path) save_spec(nn_builder.spec, model_path) mlmodel = MLModel(model_path) quantized_model = quantization_utils.quantize_weights( mlmodel, 16, "linear") q_nn_builder = NeuralNetworkBuilder(spec=quantized_model._spec) # fails since an FP16 model cannot be marked updatable with self.assertRaises(ValueError): q_nn_builder.make_updatable(["ip1", "ip2"])
def test_nn_partial_fp16_make_updatable_quantized_layer_fail(self): nn_builder = self.create_base_builder(is_updatable=False) model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") print(model_path) save_spec(nn_builder.spec, model_path) mlmodel = MLModel(model_path) selector = LayerSelector(layer_name='ip2') quantized_model = quantization_utils.quantize_weights( mlmodel, 16, "linear", selector=selector) q_nn_builder = NeuralNetworkBuilder(spec=quantized_model._spec) # fails since model has a layer with FP16 bias with self.assertRaises(ValueError): q_nn_builder.make_updatable(["ip2"])
num_classes = 10 layer.innerProduct.outputChannels = num_classes weights = np.zeros((num_classes - 3) * 1000) biases = np.zeros(num_classes - 3) labels = ["user" + str(i) for i in range(num_classes - 3)] layer.innerProduct.weights.floatValue.extend(weights) layer.innerProduct.bias.floatValue.extend(biases) spec.neuralNetworkClassifier.stringClassLabels.vector.extend(labels) # Make this model trainable. builder = NeuralNetworkBuilder(spec=model._spec) builder.make_updatable(["fullyconnected0"]) builder.set_categorical_cross_entropy_loss(name="lossLayer", input="labelProbability") builder.set_epochs(10, [1, 10, 50]) # Using the SDG optimizer: sgd_params = SgdParams(lr=0.001, batch=8, momentum=0) sgd_params.set_batch(8, [1, 2, 8, 16]) builder.set_sgd_optimizer(sgd_params) # Using the Adam optimizer: # adam_params = AdamParams(lr=0.001, batch=8, beta1=0.9, beta2=0.999, eps=1e-8) # adam_params.set_batch(8, [1, 2, 8, 16]) # builder.set_adam_optimizer(adam_params) builder.spec.description.trainingInput[0].shortDescription = "Example image" builder.spec.description.trainingInput[1].shortDescription = "True label"
input_features = [('data', datatypes.Array(*input_dim))] output_features = [('result', datatypes.Array(*output_dim))] weights = np.random.rand(1, input_max_size) bias = np.random.rand(1) builder = NeuralNetworkBuilder(input_features, output_features) builder.add_inner_product(name='ip_layer', W=weights, b=bias, input_channels=input_max_size, output_channels=1, has_bias=True, input_name='data', output_name='result') builder.make_updatable(['ip_layer']) builder.set_mean_squared_error_loss(name='lossLayer', input_feature=output_features[0]) optimizerParams = SgdParams(lr=0.01, batch=1) optimizerParams.set_batch(1, allowed_set=[1, 2, 4, 8, 16, 32]) builder.set_sgd_optimizer(optimizerParams) builder.set_epochs(16, allowed_set=[2, 4, 8, 16, 32, 64, 128, 256]) #builder.spec = convert_neural_network_spec_weights_to_fp16(builder.spec) save_spec(builder.spec, '../core/LinearRegressionModel.mlmodel')