def export_coreml(self, filename): """ Export the model in Core ML format. Parameters ---------- filename: str A valid filename where the model can be saved. Examples -------- >>> model.export_coreml("MyModel.mlmodel") """ from turicreate.extensions import _logistic_classifier_export_as_model_asset from turicreate.toolkits import _coreml_utils display_name = 'text classifier' short_description = _coreml_utils._mlmodel_short_description( display_name) context = { 'class': self.__class__.__name__, 'short_description': short_description, } context['user_defined'] = _coreml_utils._get_tc_version_info() model = self.__proxy__['classifier'].__proxy__ _logistic_classifier_export_as_model_asset(model, filename, context)
def export_coreml(self, filename): """ Export the model in Core ML format. Parameters ---------- filename: str A valid filename where the model can be saved. Examples -------- >>> model.export_coreml("MyModel.mlmodel") """ from turicreate.extensions import _linear_regression_export_as_model_asset from turicreate.toolkits import _coreml_utils display_name = "linear regression" short_description = _coreml_utils._mlmodel_short_description( display_name) context = { "class": self.__class__.__name__, "short_description": short_description, } context['user_defined'] = _coreml_utils._get_tc_version_info() _linear_regression_export_as_model_asset(self.__proxy__, filename, context)
def _export_coreml_impl(self, filename, context): tc_version_info = _coreml_utils._get_tc_version_info() if 'user_defined' not in context: context['user_defined'] = tc_version_info else: context['user_defined'].update(tc_version_info) tc.extensions._xgboost_export_as_model_asset(self.__proxy__, filename, context)
def _set_inputs_outputs_and_metadata(spec, nn_spec): # Replace the classifier with the new classes class_labels = self.classifier.classes probOutput = spec.description.output[0] classLabel = spec.description.output[1] probOutput.type.dictionaryType.MergeFromString(b'') if type(class_labels[0]) == int: nn_spec.ClearField('int64ClassLabels') probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'') classLabel.type.int64Type.MergeFromString(b'') del nn_spec.int64ClassLabels.vector[:] for c in class_labels: nn_spec.int64ClassLabels.vector.append(c) else: nn_spec.ClearField('stringClassLabels') probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'') classLabel.type.stringType.MergeFromString(b'') del nn_spec.stringClassLabels.vector[:] for c in class_labels: nn_spec.stringClassLabels.vector.append(c) prob_name = self.target + 'Probability' label_name = self.target old_output_name = nn_spec.layers[-1].name coremltools.models.utils.rename_feature(spec, 'classLabel', label_name) coremltools.models.utils.rename_feature(spec, old_output_name, prob_name) if nn_spec.layers[-1].name == old_output_name: nn_spec.layers[-1].name = prob_name if nn_spec.labelProbabilityLayerName == old_output_name: nn_spec.labelProbabilityLayerName = prob_name coremltools.models.utils.rename_feature(spec, 'data', self.feature) if len(nn_spec.preprocessing) > 0: nn_spec.preprocessing[0].featureName = self.feature mlmodel = coremltools.models.MLModel(spec) model_type = 'image classifier (%s)' % self.model mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type) mlmodel.input_description[self.feature] = u'Input image' mlmodel.output_description[prob_name] = 'Prediction probabilities' mlmodel.output_description[label_name] = 'Class label of top prediction' model_metadata = { 'model': self.model, 'target': self.target, 'features': self.feature, 'max_iterations': str(self.max_iterations), } user_defined_metadata = model_metadata.update( _coreml_utils._get_tc_version_info()) _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, user_defined_metadata, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION) return mlmodel
def export_coreml(self, filename): """ Export the model in Core ML format. Parameters ---------- filename: str A valid filename where the model can be saved. Examples -------- >>> model.export_coreml("MyModel.mlmodel") """ additional_user_defined_metadata = _coreml_utils._get_tc_version_info() short_description = _coreml_utils._mlmodel_short_description( "Drawing Classifier") self.__proxy__.export_to_coreml(filename, short_description, additional_user_defined_metadata)
def export_coreml(self, filename, image_shape=(256, 256), include_flexible_shape=True): """ Save the model in Core ML format. The Core ML model takes an image of fixed size, and a style index inputs and produces an output of an image of fixed size Parameters ---------- path : string A string to the path for saving the Core ML model. image_shape: tuple A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape. include_flexible_shape: bool Allows the size of the input image to be flexible. Any input image were the height and width are at least 64 will be accepted by the Core ML Model. See Also -------- save Examples -------- >>> model.export_coreml('StyleTransfer.mlmodel') """ options = {} options['image_width'] = image_shape[1] options['image_height'] = image_shape[0] options['include_flexible_shape'] = include_flexible_shape additional_user_defined_metadata = _coreml_utils._get_tc_version_info() short_description = _coreml_utils._mlmodel_short_description( 'Style Transfer') self.__proxy__.export_to_coreml(filename, short_description, additional_user_defined_metadata, options)
def export_coreml(self, filename): """ Save the model in Core ML format. The exported model calculates the distance between a query image and each row of the model's stored data. It does not sort and retrieve the k nearest neighbors of the query image. See Also -------- save Examples -------- >>> # Train an image similarity model >>> model = turicreate.image_similarity.create(data) >>> >>> # Query the model for similar images >>> similar_images = model.query(data) +-------------+-----------------+---------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+---------------+------+ | 0 | 0 | 0.0 | 1 | | 0 | 2 | 24.9664942809 | 2 | | 0 | 1 | 28.4416069428 | 3 | | 1 | 1 | 0.0 | 1 | | 1 | 2 | 21.8715131191 | 2 | | 1 | 0 | 28.4416069428 | 3 | | 2 | 2 | 0.0 | 1 | | 2 | 1 | 21.8715131191 | 2 | | 2 | 0 | 24.9664942809 | 3 | +-------------+-----------------+---------------+------+ [9 rows x 4 columns] >>> >>> # Export the model to Core ML format >>> model.export_coreml('myModel.mlmodel') >>> >>> # Load the Core ML model >>> import coremltools >>> ml_model = coremltools.models.MLModel('myModel.mlmodel') >>> >>> # Prepare the first image of reference data for consumption >>> # by the Core ML model >>> import PIL >>> image = tc.image_analysis.resize(data['image'][0], *reversed(model.input_image_shape)) >>> image = PIL.Image.fromarray(image.pixel_data) >>> >>> # Calculate distances using the Core ML model >>> ml_model.predict(data={'image': image}) {'distance': array([ 0., 28.453125, 24.96875 ])} """ import numpy as _np from copy import deepcopy from turicreate._deps.minimal_package import _minimal_package_import_check _cmt = _minimal_package_import_check("coremltools") from coremltools.models import ( datatypes as _datatypes, neural_network as _neural_network, ) from turicreate.toolkits import _coreml_utils # Get the reference data from the model proxy = self.similarity_model.__proxy__ reference_data = _np.array( _tc.extensions._nearest_neighbors._nn_get_reference_data(proxy)) num_examples, embedding_size = reference_data.shape output_name = "distance" output_features = [(output_name, _datatypes.Array(num_examples))] if self.model != "VisionFeaturePrint_Scene": # Get the Core ML spec for the feature extractor ptModel = _pre_trained_models.IMAGE_MODELS[self.model]() feature_extractor = _image_feature_extractor.TensorFlowFeatureExtractor( ptModel) feature_extractor_spec = feature_extractor.get_coreml_model( ).get_spec() input_name = feature_extractor.coreml_data_layer input_features = [(input_name, _datatypes.Array(*(self.input_image_shape)))] # Convert the neuralNetworkClassifier to a neuralNetwork layers = deepcopy( feature_extractor_spec.neuralNetworkClassifier.layers) for l in layers: feature_extractor_spec.neuralNetwork.layers.append(l) builder = _neural_network.NeuralNetworkBuilder( input_features, output_features, spec=feature_extractor_spec) feature_layer = feature_extractor.coreml_feature_layer else: # self.model == VisionFeaturePrint_Scene # Create a pipleline that contains a VisionFeaturePrint followed by a # neural network. BGR_VALUE = _cmt.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( "BGR") DOUBLE_ARRAY_VALUE = _cmt.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( "DOUBLE") INPUT_IMAGE_SHAPE = 299 top_spec = _cmt.proto.Model_pb2.Model() top_spec.specificationVersion = 3 desc = top_spec.description input = desc.input.add() input.name = self.feature input.type.imageType.width = INPUT_IMAGE_SHAPE input.type.imageType.height = INPUT_IMAGE_SHAPE input.type.imageType.colorSpace = BGR_VALUE output = desc.output.add() output.name = output_name output.type.multiArrayType.shape.append(num_examples) output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE # VisionFeaturePrint extractor pipeline = top_spec.pipeline scene_print = pipeline.models.add() scene_print.specificationVersion = 3 scene_print.visionFeaturePrint.scene.version = 1 input = scene_print.description.input.add() input.name = self.feature input.type.imageType.width = 299 input.type.imageType.height = 299 input.type.imageType.colorSpace = BGR_VALUE feature_layer = "VisionFeaturePrint_Scene_output" output = scene_print.description.output.add() output.name = feature_layer output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE output.type.multiArrayType.shape.append(2048) # Neural network builder input_features = [(feature_layer, _datatypes.Array(2048))] builder = _neural_network.NeuralNetworkBuilder( input_features, output_features) # To add the nearest neighbors model we add calculation of the euclidean # distance between the newly extracted query features (denoted by the vector u) # and each extracted reference feature (denoted by the rows of matrix V). # Calculation of sqrt((v_i-u)^2) = sqrt(v_i^2 - 2v_i*u + u^2) ensues. V = reference_data v_squared = (V * V).sum(axis=1) builder.add_inner_product( "v^2-2vu", W=-2 * V, b=v_squared, has_bias=True, input_channels=embedding_size, output_channels=num_examples, input_name=feature_layer, output_name="v^2-2vu", ) builder.add_unary( "element_wise-u^2", mode="power", alpha=2, input_name=feature_layer, output_name="element_wise-u^2", ) # Produce a vector of length num_examples with all values equal to u^2 builder.add_inner_product( "u^2", W=_np.ones((embedding_size, num_examples)), b=None, has_bias=False, input_channels=embedding_size, output_channels=num_examples, input_name="element_wise-u^2", output_name="u^2", ) builder.add_elementwise( "v^2-2vu+u^2", mode="ADD", input_names=["v^2-2vu", "u^2"], output_name="v^2-2vu+u^2", ) # v^2-2vu+u^2=(v-u)^2 is non-negative but some computations on GPU may result in # small negative values. Apply RELU so we don't take the square root of negative values. builder.add_activation("relu", non_linearity="RELU", input_name="v^2-2vu+u^2", output_name="relu") builder.add_unary("sqrt", mode="sqrt", input_name="relu", output_name=output_name) # Finalize model if self.model != "VisionFeaturePrint_Scene": builder.set_input([input_name], [self.input_image_shape]) builder.set_output([output_name], [(num_examples, )]) _cmt.models.utils.rename_feature(builder.spec, input_name, self.feature) builder.set_pre_processing_parameters( image_input_names=self.feature) mlmodel = _cmt.models.MLModel(builder.spec) else: top_spec.pipeline.models.extend([builder.spec]) mlmodel = _cmt.models.MLModel(top_spec) # Add metadata model_type = "image similarity" mlmodel.short_description = _coreml_utils._mlmodel_short_description( model_type) mlmodel.input_description[self.feature] = u"Input image" mlmodel.output_description[ output_name] = u"Distances between the input and reference images" model_metadata = { "model": self.model, "num_examples": str(self.num_examples), } user_defined_metadata = model_metadata.update( _coreml_utils._get_tc_version_info()) _coreml_utils._set_model_metadata( mlmodel, self.__class__.__name__, user_defined_metadata, version=ImageSimilarityModel._PYTHON_IMAGE_SIMILARITY_VERSION, ) mlmodel.save(filename)
def export_coreml( self, filename, include_non_maximum_suppression=True, iou_threshold=None, confidence_threshold=None, ): """ Save the model in Core ML format. The Core ML model takes an image of fixed size as input and produces two output arrays: `confidence` and `coordinates`. The first one, `confidence` is an `N`-by-`C` array, where `N` is the number of instances predicted and `C` is the number of classes. The number `N` is fixed and will include many low-confidence predictions. The instances are not sorted by confidence, so the first one will generally not have the highest confidence (unlike in `predict`). Also unlike the `predict` function, the instances have not undergone what is called `non-maximum suppression`, which means there could be several instances close in location and size that have all discovered the same object instance. Confidences do not need to sum to 1 over the classes; any remaining probability is implied as confidence there is no object instance present at all at the given coordinates. The classes appear in the array alphabetically sorted. The second array `coordinates` is of size `N`-by-4, where the first dimension `N` again represents instances and corresponds to the `confidence` array. The second dimension represents `x`, `y`, `width`, `height`, in that order. The values are represented in relative coordinates, so (0.5, 0.5) represents the center of the image and (1, 1) the bottom right corner. You will need to multiply the relative values with the original image size before you resized it to the fixed input size to get pixel-value coordinates similar to `predict`. See Also -------- save Parameters ---------- filename : string The path of the file where we want to save the Core ML model. include_non_maximum_suppression : bool Non-maximum suppression is only available in iOS 12+. A boolean parameter to indicate whether the Core ML model should be saved with built-in non-maximum suppression or not. This parameter is set to True by default. iou_threshold : float Threshold value for non-maximum suppression. Non-maximum suppression prevents multiple bounding boxes appearing over a single object. This threshold, set between 0 and 1, controls how aggressive this suppression is. A value of 1 means no maximum suppression will occur, while a value of 0 will maximally suppress neighboring boxes around a prediction. confidence_threshold : float Only return predictions above this level of confidence. The threshold can range from 0 to 1. Examples -------- >>> model.export_coreml('one_shot.mlmodel') """ from turicreate.toolkits import _coreml_utils additional_user_defined_metadata = _coreml_utils._get_tc_version_info() short_description = _coreml_utils._mlmodel_short_description( "Object Detector") options = { "include_non_maximum_suppression": include_non_maximum_suppression, } options["version"] = self._PYTHON_ONE_SHOT_OBJECT_DETECTOR_VERSION if confidence_threshold is not None: options["confidence_threshold"] = confidence_threshold if iou_threshold is not None: options["iou_threshold"] = iou_threshold additional_user_defined_metadata = _coreml_utils._get_tc_version_info() short_description = _coreml_utils._mlmodel_short_description( "One Shot Object Detector") self.__proxy__["detector"].__proxy__.export_to_coreml( filename, short_description, additional_user_defined_metadata, options)
def export_coreml(self, filename): """ Save the model in Core ML format. See Also -------- save Examples -------- >>> model.export_coreml('./myModel.mlmodel') """ import coremltools from coremltools.proto.FeatureTypes_pb2 import ArrayFeatureType prob_name = self.target + "Probability" def get_custom_model_spec(): from coremltools.models.neural_network import NeuralNetworkBuilder from coremltools.models.datatypes import Array input_name = "output1" input_length = self._feature_extractor.output_length builder = NeuralNetworkBuilder( [(input_name, Array(input_length,))], [(prob_name, Array(self.num_classes,))], "classifier", ) layer_counter = [0] builder.set_input([input_name], [(input_length,)]) def next_layer_name(): layer_counter[0] += 1 return "layer_%d" % layer_counter[0] for i, cur_layer in enumerate(self._custom_classifier.export_weights()): W = cur_layer["weight"] nC, nB = W.shape Wb = cur_layer["bias"] output_name = next_layer_name() builder.add_inner_product( name="inner_product_" + str(i), W=W, b=Wb, input_channels=nB, output_channels=nC, has_bias=True, input_name=input_name, output_name=output_name, ) input_name = output_name if cur_layer["act"]: output_name = next_layer_name() builder.add_activation( "activation" + str(i), "RELU", input_name, output_name ) input_name = output_name builder.add_softmax("softmax", input_name, prob_name) builder.set_class_labels( self.classes, predicted_feature_name=self.target, prediction_blob=prob_name, ) return builder.spec top_level_spec = coremltools.proto.Model_pb2.Model() top_level_spec.specificationVersion = 3 # Set input desc = top_level_spec.description input = desc.input.add() input.name = self.feature assert type(self.feature) is str input.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value( "FLOAT32" ) input.type.multiArrayType.shape.append(15600) # Set outputs prob_output = desc.output.add() prob_output.name = prob_name label_output = desc.output.add() label_output.name = self.target desc.predictedFeatureName = self.target desc.predictedProbabilitiesName = prob_name if type(self.classes[0]) == int: # Class labels are ints prob_output.type.dictionaryType.int64KeyType.MergeFromString(b"") label_output.type.int64Type.MergeFromString(b"") else: # Class are strings prob_output.type.dictionaryType.stringKeyType.MergeFromString(b"") label_output.type.stringType.MergeFromString(b"") # Set metadata user_metadata = desc.metadata.userDefined user_metadata["sampleRate"] = str(self._feature_extractor.input_sample_rate) pipeline = top_level_spec.pipelineClassifier.pipeline # Add the preprocessing model preprocessing_model = pipeline.models.add() preprocessing_model.customModel.className = "TCSoundClassifierPreprocessing" preprocessing_model.specificationVersion = 3 preprocessing_input = preprocessing_model.description.input.add() preprocessing_input.CopyFrom(input) preprocessed_output = preprocessing_model.description.output.add() preprocessed_output.name = "preprocessed_data" preprocessed_output.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value( "DOUBLE" ) preprocessed_output.type.multiArrayType.shape.append(1) preprocessed_output.type.multiArrayType.shape.append(96) preprocessed_output.type.multiArrayType.shape.append(64) # Add the feature extractor, updating its input name feature_extractor_spec = self._feature_extractor.get_spec() pipeline.models.add().CopyFrom(feature_extractor_spec) pipeline.models[-1].description.input[0].name = preprocessed_output.name pipeline.models[-1].neuralNetwork.layers[0].input[0] = preprocessed_output.name # Add the custom neural network pipeline.models.add().CopyFrom(get_custom_model_spec()) # Set key type for the probability dictionary prob_output_type = pipeline.models[-1].description.output[0].type.dictionaryType if type(self.classes[0]) == int: prob_output_type.int64KeyType.MergeFromString(b"") else: # String labels prob_output_type.stringKeyType.MergeFromString(b"") mlmodel = coremltools.models.MLModel(top_level_spec) model_type = "sound classifier" mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type) mlmodel.input_description[self.feature] = u"Input audio features" mlmodel.output_description[prob_name] = "Prediction probabilities" mlmodel.output_description[self.target] = "Class label of top prediction" model_metadata = { "target": self.target, "feature": self.feature, } user_defined_metadata = model_metadata.update( _coreml_utils._get_tc_version_info() ) _coreml_utils._set_model_metadata( mlmodel, self.__class__.__name__, user_defined_metadata, version=SoundClassifier._PYTHON_SOUND_CLASSIFIER_VERSION, ) mlmodel.save(filename)
def export_coreml(self, filename): """ Save the model in Core ML format. See Also -------- save Examples -------- >>> model.export_coreml('./myModel.mlmodel') """ import coremltools from coremltools.proto.FeatureTypes_pb2 import ArrayFeatureType prob_name = self.target + 'Probability' def get_custom_model_spec(): from coremltools.models.neural_network import NeuralNetworkBuilder from coremltools.models.datatypes import Array, Dictionary, String input_name = 'output1' input_length = self._feature_extractor.output_length builder = NeuralNetworkBuilder( [(input_name, Array(input_length, ))], [(prob_name, Dictionary(String))], 'classifier') input_name, output_name = input_name, 0 for i, cur_layer in enumerate( self._custom_classifier.export_weights()): W = cur_layer['weight'] nC, nB = W.shape Wb = cur_layer['bias'] builder.add_inner_product(name="inner_product_" + str(i), W=W, b=Wb, input_channels=nB, output_channels=nC, has_bias=True, input_name=str(input_name), output_name='inner_product_' + str(output_name)) if cur_layer['act']: builder.add_activation("activation" + str(i), 'RELU', 'inner_product_' + str(output_name), str(output_name)) input_name = i output_name = i + 1 last_output = builder.spec.neuralNetworkClassifier.layers[ -1].output[0] builder.add_softmax('softmax', last_output, self.target) builder.set_class_labels(self.classes, predicted_feature_name=self.target) builder.set_input([input_name], [(input_length, )]) builder.set_output([self.target], [(self.num_classes, )]) return builder.spec top_level_spec = coremltools.proto.Model_pb2.Model() top_level_spec.specificationVersion = 3 # Set input desc = top_level_spec.description input = desc.input.add() input.name = self.feature input.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value( 'FLOAT32') input.type.multiArrayType.shape.append(15600) # Set outputs prob_output = desc.output.add() prob_output.name = prob_name label_output = desc.output.add() label_output.name = self.target desc.predictedFeatureName = self.target desc.predictedProbabilitiesName = prob_name if type(self.classes[0]) == int: # Class labels are ints prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'') label_output.type.int64Type.MergeFromString(b'') else: # Class are strings prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'') label_output.type.stringType.MergeFromString(b'') # Set metadata user_metadata = desc.metadata.userDefined user_metadata['sampleRate'] = str( self._feature_extractor.input_sample_rate) pipeline = top_level_spec.pipelineClassifier.pipeline # Add the preprocessing model preprocessing_model = pipeline.models.add() preprocessing_model.customModel.className = 'TCSoundClassifierPreprocessing' preprocessing_model.specificationVersion = 3 preprocessing_input = preprocessing_model.description.input.add() preprocessing_input.CopyFrom(input) preprocessed_output = preprocessing_model.description.output.add() preprocessed_output.name = 'preprocessed_data' preprocessed_output.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value( 'DOUBLE') preprocessed_output.type.multiArrayType.shape.append(1) preprocessed_output.type.multiArrayType.shape.append(96) preprocessed_output.type.multiArrayType.shape.append(64) # Add the feature extractor, updating its input name feature_extractor_spec = self._feature_extractor.get_spec() pipeline.models.add().CopyFrom(feature_extractor_spec) pipeline.models[-1].description.input[ 0].name = preprocessed_output.name pipeline.models[-1].neuralNetwork.layers[0].input[ 0] = preprocessed_output.name # Add the custom neural network pipeline.models.add().CopyFrom(get_custom_model_spec()) # Set key type for the probability dictionary prob_output_type = pipeline.models[-1].description.output[ 0].type.dictionaryType if type(self.classes[0]) == int: prob_output_type.int64KeyType.MergeFromString(b'') else: # String labels prob_output_type.stringKeyType.MergeFromString(b'') mlmodel = coremltools.models.MLModel(top_level_spec) model_type = 'sound classifier' mlmodel.short_description = _coreml_utils._mlmodel_short_description( model_type) mlmodel.input_description[self.feature] = u'Input audio features' mlmodel.output_description[prob_name] = 'Prediction probabilities' mlmodel.output_description[ self.target] = 'Class label of top prediction' model_metadata = { 'target': self.target, 'feature': self.feature, } user_defined_metadata = model_metadata.update( _coreml_utils._get_tc_version_info()) _coreml_utils._set_model_metadata( mlmodel, self.__class__.__name__, user_defined_metadata, version=SoundClassifier._PYTHON_SOUND_CLASSIFIER_VERSION) mlmodel.save(filename)