def __init__(self, model): super(CoremlParser, self).__init__() # load model file into Coreml Graph if isinstance(model, _string_types): model = _MLModel(model) model = model.get_spec() self.weight_loaded = True else: assert False # Build Network Graph model_type = model.WhichOneof('Type') if model_type == 'neuralNetworkClassifier': CoremlParser.shape_dict = infer_shapes( model.neuralNetworkClassifier, model.description.input) elif model_type == 'neuralNetwork': CoremlParser.shape_dict = infer_shapes(model.neuralNetwork, model.description.input) elif model_type == 'neuralNetworkRegressor': CoremlParser.shape_dict = infer_shapes( model.neuralNetworkRegressor, model.description.input) else: assert False # self.data_format ? TODO self.data_format = 'channels_first' self.coreml_graph = CoremlGraph(model) self.coreml_graph.build() self.lambda_layer_count = 0
def convert_keras( cls, model, input_names=None, output_names=None, image_input_names=[], image_output_names=[], deprocessing_args={}, is_bgr=False, is_grayscale=False, red_bias=0.0, green_bias=0.0, blue_bias=0.0, gray_bias=0.0, image_scale=1.0, class_labels=None, predicted_feature_name=None, custom_layers=None): """ Convert a Keras model to a Core ML Model. model - a Keras model to convert input_names - names of input layers. Default None output_names - names of output layers. Default None image_input_names - a list of input names that are image datatypes image_output_names - a list of output names that are image datatypes preprocessing_args - a dictionary of arguments for input preprocessing class_labels - Class labels for outputs, predicted_feature_name - name for predicted features, custom_layers - a dictionary of custom layer conversions. Keys are Keras layer classes, values are coreml layer functions Returns: mlmodel - a coreml model object. """ if isinstance(model, basestring): model = _keras.models.load_model(model) elif isinstance(model, tuple): model = _load_keras_model(model[0], model[1]) # Merge the custom layers with the Keras layer registry supported_layers = {} supported_layers.update(_KERAS_LAYER_REGISTRY) if custom_layers: supported_layers.update(custom_layers) # Check valid versions cls._check_unsupported_layers(model, supported_layers) # Build network graph to represent Keras model graph = _topology2.NetGraph(model) graph.build() graph.remove_skip_layers(_KERAS_SKIP_LAYERS) graph.insert_1d_permute_layers() graph.insert_permute_for_spatial_bn() graph.defuse_activation() graph.remove_internal_input_layers() graph.make_output_layers() # The graph should be finalized before executing this graph.generate_blob_names() graph.add_recurrent_optionals() inputs = graph.get_input_layers() outputs = graph.get_output_layers() # check input / output names validity if input_names is not None: if isinstance(input_names, basestring): input_names = [input_names] else: input_names = ['input' + str(i + 1) for i in range(len(inputs))] if output_names is not None: if isinstance(output_names, basestring): output_names = [output_names] else: output_names = ['output' + str(i + 1) for i in range(len(outputs))] if (image_input_names is not None and isinstance(image_input_names, basestring)): image_input_names = [image_input_names] graph.reset_model_input_names(input_names) graph.reset_model_output_names(output_names) # Keras -> Core ML input dimension dictionary # (None, None) -> [1, 1, 1, 1, 1] # (None, D) -> [D] or [D, 1, 1, 1, 1] # (None, Seq, D) -> [Seq, 1, D, 1, 1] # (None, H, W, C) -> [C, H, W] # (D) -> [D] # (Seq, D) -> [Seq, 1, 1, D, 1] # (Batch, Sequence, D) -> [D] # Retrieve input shapes from model if type(model.input_shape) is list: input_dims = [filter(None, x) for x in model.input_shape] unfiltered_shapes = model.input_shape else: input_dims = [filter(None, model.input_shape)] unfiltered_shapes = [model.input_shape] for idx, dim in enumerate(input_dims): unfiltered_shape = unfiltered_shapes[idx] if len(dim) == 0: # Used to be [None, None] before filtering; indicating # unknown sequence length input_dims[idx] = tuple([1]) elif len(dim) == 1: s = graph.get_successors(inputs[idx])[0] if isinstance(graph.get_keras_layer(s), _keras.layers.embeddings.Embedding): # Embedding layer's special input (None, D) where D is # actually sequence length input_dims[idx] = (1,) else: input_dims[idx] = dim # dim is just a number elif len(dim) == 2: # [Seq, D] input_dims[idx] = (dim[1],) elif len(dim) == 3: # H,W,C if (len(unfiltered_shape) > 3): # keras uses the reverse notation from us input_dims[idx] = (dim[2], dim[0], dim[1]) else: # keras provided fixed batch and sequence length, so # the input was (batch, sequence, channel) input_dims[idx] = (dim[2],) else: raise ValueError( 'Input' + input_names[idx] + 'has input shape of length' + str(len(dim))) # Retrieve output shapes from model if type(model.output_shape) is list: output_dims = [filter(None, x) for x in model.output_shape] else: output_dims = [filter(None, model.output_shape[1:])] for idx, dim in enumerate(output_dims): if len(dim) == 1: output_dims[idx] = dim elif len(dim) == 2: # [Seq, D] output_dims[idx] = (dim[1],) elif len(dim) == 3: output_dims[idx] = (dim[2], dim[1], dim[0]) input_types = [datatypes.Array(*dim) for dim in input_dims] output_types = [datatypes.Array(*dim) for dim in output_dims] # Some of the feature handling is sensitive about string vs. unicode input_names = map(str, input_names) output_names = map(str, output_names) is_classifier = class_labels is not None if is_classifier: mode = 'classifier' else: mode = None _output_names = [] for name in output_names: _output_names.append(name) # assuming these match input_features = zip(input_names, input_types) output_features = zip(_output_names, output_types) builder = _NeuralNetworkBuilder( input_features, output_features, mode=mode ) for iter, layer in enumerate(graph.layer_list): keras_layer = graph.keras_layer_map[layer] print("%d : %s, %s" % (iter, layer, keras_layer)) if isinstance(keras_layer, _keras.layers.wrappers.TimeDistributed): keras_layer = keras_layer.layer converter_func = cls._get_layer_converter_fn( keras_layer, supported_layers ) input_names, output_names = graph.get_layer_blobs(layer) converter_func( builder, layer, input_names, output_names, keras_layer ) # Set the right inputs and outputs on the model description (interface) builder.set_input(input_names, input_dims) builder.set_output(output_names, output_dims) # Since we aren't mangling anything the user gave us, we only need to # update the model interface here builder.add_optionals(graph.optional_inputs, graph.optional_outputs) # Add classifier classes (if applicable) if is_classifier: classes_in = class_labels if isinstance(classes_in, basestring): import os if not os.path.isfile(classes_in): raise ValueError( "Path to class labels (%s) does not exist." % classes_in ) with open(classes_in, 'r') as f: classes = f.read() classes = classes.splitlines() elif type(classes_in) is list: # list[int or str] classes = classes_in else: raise ValueError( 'Class labels must be a list of integers / ' 'strings, or a file path' ) if predicted_feature_name is not None: builder.set_class_labels( classes, predicted_feature_name=predicted_feature_name ) else: builder.set_class_labels(classes) # Set pre-processing paramsters builder.set_pre_processing_parameters( image_input_names=image_input_names, is_bgr=is_bgr, red_bias=red_bias, green_bias=green_bias, blue_bias=blue_bias, gray_bias=gray_bias, image_scale=image_scale) # Convert the image outputs to actual image datatypes for output_name in output_names: if output_name in image_output_names: cls._convert_multiarray_output_to_image( builder.spec, output_name, is_bgr=is_bgr ) # Return the protobuf spec spec = builder.spec return _MLModel(spec)
def main(): loaded_model_ml = _MLModel('result/posenet257.mlmodel') coremlParser = CoremlParser(loaded_model_ml) coremlParser.run('result/posenet257_ir')