def export_coreml(self, path, image_shape=(256, 256), 
        include_flexible_shape=True):
        """
        Save the model in Core ML format. The Core ML model takes an image of
        fixed size, and a style index inputs and produces an output
        of an image of fixed size

        Parameters
        ----------
        path : string
            A string to the path for saving the Core ML model.

        image_shape: tuple
            A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape.

        include_flexible_shape: bool
            A boolean value indicating whether flexible_shape should be included or not.

        See Also
        --------
        save

        Examples
        --------
        >>> model.export_coreml('StyleTransfer.mlmodel')
        """
        import mxnet as _mx
        from .._mxnet_to_coreml import _mxnet_converter
        import coremltools

        transformer = self._model
        index = _mx.sym.Variable("index", shape=(1,), dtype=_np.int32)

        # append batch size and channels
        image_shape = (1, 3) + image_shape
        c_image = _mx.sym.Variable(self.content_feature, shape=image_shape,
                                         dtype=_np.float32)

        # signal that we want the transformer to prepare for coreml export
        # using a zero batch size
        transformer.batch_size = 0
        transformer.scale255 = True
        sym_out = transformer(c_image, index)

        mod = _mx.mod.Module(symbol=sym_out, data_names=[self.content_feature, "index"],
                                    label_names=None)
        mod.bind(data_shapes=zip([self.content_feature, "index"], [image_shape, (1,)]), for_training=False,
                 inputs_need_grad=False)
        gluon_weights = transformer.collect_params()
        gluon_layers = []
        for layer in transformer.collect_params()._params:
            gluon_layers.append(layer)

        sym_layers = mod._param_names
        sym_weight_dict = {}
        for gluon_layer, sym_layer in zip(gluon_layers, sym_layers):
            sym_weight_dict[sym_layer] = gluon_weights[gluon_layer]._data[0]

        mod.set_params(sym_weight_dict, sym_weight_dict)
        index_dim = (1, self.num_styles)
        coreml_model = _mxnet_converter.convert(mod, input_shape=[(self.content_feature, image_shape), ('index', index_dim)],
                        mode=None, preprocessor_args=None, builder=None, verbose=False)

        transformer.scale255 = False
        spec = coreml_model.get_spec()
        image_input = spec.description.input[0]
        image_output = spec.description.output[0]

        input_array_shape = tuple(image_input.type.multiArrayType.shape)
        output_array_shape = tuple(image_output.type.multiArrayType.shape)

        self._export_coreml_image(image_input, input_array_shape)
        self._export_coreml_image(image_output, output_array_shape)

        stylized_image = 'stylized%s' % self.content_feature.capitalize()
        coremltools.utils.rename_feature(spec,
                'transformer__mulscalar0_output', stylized_image, True, True)

        if include_flexible_shape:
            # Support flexible shape
            flexible_shape_utils = _mxnet_converter._coremltools.models.neural_network.flexible_shape_utils
            img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
            img_size_ranges.add_height_range((64, -1))
            img_size_ranges.add_width_range((64, -1))
            flexible_shape_utils.update_image_size_range(spec, feature_name=self.content_feature, size_range=img_size_ranges)
            flexible_shape_utils.update_image_size_range(spec, feature_name=stylized_image, size_range=img_size_ranges)

        model_type = 'style transfer (%s)' % self.model
        spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(
            model_type)
        spec.description.input[0].shortDescription = 'Input image'
        spec.description.input[1].shortDescription = u'Style index array (set index I to 1.0 to enable Ith style)'
        spec.description.output[0].shortDescription = 'Stylized image'
        user_defined_metadata = _coreml_utils._get_model_metadata(
            self.__class__.__name__, {
                'model': self.model,
                'num_styles': str(self.num_styles),
                'content_feature': self.content_feature,
                'style_feature': self.style_feature,
                'max_iterations': str(self.max_iterations),
                'training_iterations': str(self.training_iterations),
            }, version=StyleTransfer._PYTHON_STYLE_TRANSFER_VERSION)
        spec.description.metadata.userDefined.update(user_defined_metadata)
        from coremltools.models.utils import save_spec as _save_spec
        _save_spec(spec, path)
Exemple #2
0
    def export_coreml(self, filename, verbose=False):
        """
        Save the model in Core ML format. The Core ML model takes a grayscale
        drawing of fixed size as input and produces two outputs:
        `classLabel` and `labelProbabilities`.

        The first one, `classLabel` is an integer or string (depending on the
        classes the model was trained on) to store the label of the top
        prediction by the model.

        The second one, `labelProbabilities`, is a dictionary with all the
        class labels in the dataset as the keys, and their respective
        probabilities as the values.

        See Also
        --------
        save

        Parameters
        ----------
        filename : string
            The path of the file where we want to save the Core ML model.

        verbose : bool optional
            If True, prints export progress.


        Examples
        --------
        >>> model.export_coreml('drawing_classifier.mlmodel')
        """
        import mxnet as _mx
        from .._mxnet._mxnet_to_coreml import _mxnet_converter
        import coremltools as _coremltools

        batch_size = 1
        image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
        s_image = _mx.sym.Variable(self.feature,
            shape=image_shape, dtype=_np.float32)

        from copy import copy as _copy
        net = _copy(self._model)
        s_ymap = net(s_image)

        mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
        mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
        mod.init_params()

        arg_params, aux_params = mod.get_params()
        net_params = net.collect_params()

        new_arg_params = {}
        for k, param in arg_params.items():
            new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
        new_aux_params = {}
        for k, param in aux_params.items():
            new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
        mod.set_params(new_arg_params, new_aux_params)

        coreml_model = _mxnet_converter.convert(mod, mode='classifier',
                                class_labels=self.classes,
                                input_shape=[(self.feature, image_shape)],
                                builder=None, verbose=verbose,
                                preprocessor_args={
                                    'image_input_names': [self.feature],
                                    'image_scale': 1.0/255
                                })

        DESIRED_OUTPUT_NAME = self.target + "Probabilities"
        spec = coreml_model._spec
        class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
        probabilities_output_index = 1-class_label_output_index
        spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
        spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
        spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
        spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
        spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
        from turicreate.toolkits import _coreml_utils
        model_type = "drawing classifier"
        spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
        spec.description.input[0].shortDescription = self.feature
        spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
        spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
        from coremltools.models.utils import save_spec as _save_spec
        _save_spec(spec, filename)
    def export_coreml(self,
                      filename,
                      include_non_maximum_suppression=True,
                      iou_threshold=None,
                      confidence_threshold=None):
        """
        Save the model in Core ML format. The Core ML model takes an image of
        fixed size as input and produces two output arrays: `confidence` and
        `coordinates`.

        The first one, `confidence` is an `N`-by-`C` array, where `N` is the
        number of instances predicted and `C` is the number of classes. The
        number `N` is fixed and will include many low-confidence predictions.
        The instances are not sorted by confidence, so the first one will
        generally not have the highest confidence (unlike in `predict`). Also
        unlike the `predict` function, the instances have not undergone
        what is called `non-maximum suppression`, which means there could be
        several instances close in location and size that have all discovered
        the same object instance. Confidences do not need to sum to 1 over the
        classes; any remaining probability is implied as confidence there is no
        object instance present at all at the given coordinates. The classes
        appear in the array alphabetically sorted.

        The second array `coordinates` is of size `N`-by-4, where the first
        dimension `N` again represents instances and corresponds to the
        `confidence` array. The second dimension represents `x`, `y`, `width`,
        `height`, in that order.  The values are represented in relative
        coordinates, so (0.5, 0.5) represents the center of the image and (1,
        1) the bottom right corner. You will need to multiply the relative
        values with the original image size before you resized it to the fixed
        input size to get pixel-value coordinates similar to `predict`.

        See Also
        --------
        save

        Parameters
        ----------
        filename : string
            The path of the file where we want to save the Core ML model.

        include_non_maximum_suppression : bool
            Non-maximum suppression is only available in iOS 12+.
            A boolean parameter to indicate whether the Core ML model should be
            saved with built-in non-maximum suppression or not.
            This parameter is set to True by default.

        iou_threshold : float
            Threshold value for non-maximum suppression. Non-maximum suppression
            prevents multiple bounding boxes appearing over a single object.
            This threshold, set between 0 and 1, controls how aggressive this
            suppression is. A value of 1 means no maximum suppression will
            occur, while a value of 0 will maximally suppress neighboring
            boxes around a prediction.

        confidence_threshold : float
            Only return predictions above this level of confidence. The
            threshold can range from 0 to 1.

        Examples
        --------
        >>> model.export_coreml('one_shot.mlmodel')
        """
        import coremltools
        additional_user_defined_metadata = _coreml_utils._get_tc_version_info()
        short_description = _coreml_utils._mlmodel_short_description(
            'Object Detector')
        if USE_CPP:
            options = {
                'include_non_maximum_suppression':
                include_non_maximum_suppression,
            }

            options['version'] = self._PYTHON_ONE_SHOT_OBJECT_DETECTOR_VERSION

            if confidence_threshold is not None:
                options['confidence_threshold'] = confidence_threshold

            if iou_threshold is not None:
                options['iou_threshold'] = iou_threshold

            additional_user_defined_metadata = _coreml_utils._get_tc_version_info(
            )
            short_description = _coreml_utils._mlmodel_short_description(
                'One Shot Object Detector')
            self.__proxy__['detector'].__proxy__.export_to_coreml(
                filename, short_description, additional_user_defined_metadata,
                options)
        else:
            from coremltools.models.utils import save_spec as _save_spec
            model = self.__proxy__['detector']._create_coreml_model(
                include_non_maximum_suppression=include_non_maximum_suppression,
                iou_threshold=iou_threshold,
                confidence_threshold=confidence_threshold)
            model.description.metadata.shortDescription = short_description
            _save_spec(model, filename)